data
dict |
|---|
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwpXRXO",
"doi": "10.1109/CVPR.2017.268",
"title": "Scalable Surface Reconstruction from Point Clouds with Extreme Scale and Density Diversity",
"normalizedTitle": "Scalable Surface Reconstruction from Point Clouds with Extreme Scale and Density Diversity",
"abstract": "In this paper we present a scalable approach for robustly computing a 3D surface mesh from multi-scale multi-view stereo point clouds that can handle extreme jumps of point density (in our experiments three orders of magnitude). The backbone of our approach is a combination of octree data partitioning, local Delaunay tetrahedralization and graph cut optimization. Graph cut optimization is used twice, once to extract surface hypotheses from local Delaunay tetrahedralizations and once to merge overlapping surface hypotheses even when the local tetrahedralizations do not share the same topology. This formulation allows us to obtain a constant memory consumption per sub-problem while at the same time retaining the density independent interpolation properties of the Delaunay-based optimization. On multiple public datasets, we demonstrate that our approach is highly competitive with the state-of-the-art in terms of accuracy, completeness and outlier resilience. Further, we demonstrate the multi-scale potential of our approach by processing a newly recorded dataset with 2 billion points and a point density variation of more than four orders of magnitude - requiring less than 9GB of RAM per process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present a scalable approach for robustly computing a 3D surface mesh from multi-scale multi-view stereo point clouds that can handle extreme jumps of point density (in our experiments three orders of magnitude). The backbone of our approach is a combination of octree data partitioning, local Delaunay tetrahedralization and graph cut optimization. Graph cut optimization is used twice, once to extract surface hypotheses from local Delaunay tetrahedralizations and once to merge overlapping surface hypotheses even when the local tetrahedralizations do not share the same topology. This formulation allows us to obtain a constant memory consumption per sub-problem while at the same time retaining the density independent interpolation properties of the Delaunay-based optimization. On multiple public datasets, we demonstrate that our approach is highly competitive with the state-of-the-art in terms of accuracy, completeness and outlier resilience. Further, we demonstrate the multi-scale potential of our approach by processing a newly recorded dataset with 2 billion points and a point density variation of more than four orders of magnitude - requiring less than 9GB of RAM per process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present a scalable approach for robustly computing a 3D surface mesh from multi-scale multi-view stereo point clouds that can handle extreme jumps of point density (in our experiments three orders of magnitude). The backbone of our approach is a combination of octree data partitioning, local Delaunay tetrahedralization and graph cut optimization. Graph cut optimization is used twice, once to extract surface hypotheses from local Delaunay tetrahedralizations and once to merge overlapping surface hypotheses even when the local tetrahedralizations do not share the same topology. This formulation allows us to obtain a constant memory consumption per sub-problem while at the same time retaining the density independent interpolation properties of the Delaunay-based optimization. On multiple public datasets, we demonstrate that our approach is highly competitive with the state-of-the-art in terms of accuracy, completeness and outlier resilience. Further, we demonstrate the multi-scale potential of our approach by processing a newly recorded dataset with 2 billion points and a point density variation of more than four orders of magnitude - requiring less than 9GB of RAM per process.",
"fno": "0457c501",
"keywords": [
"Computer Graphics",
"Image Reconstruction",
"Interpolation",
"Mesh Generation",
"Octrees",
"Optimisation",
"Stereo Image Processing",
"Scalable Surface Reconstruction",
"Density Diversity",
"Multiscale Multiview Stereo Point Clouds",
"Octree Data Partitioning",
"Graph Cut Optimization",
"Density Independent Interpolation Properties",
"3 D Surface Mesh",
"Delaunay Tetrahedralization",
"Point Density",
"Three Dimensional Displays",
"Optimization",
"Octrees",
"Surface Reconstruction",
"Image Reconstruction",
"Scalability",
"Image Edge Detection"
],
"authors": [
{
"affiliation": null,
"fullName": "Christian Mostegel",
"givenName": "Christian",
"surname": "Mostegel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rudolf Prettenthaler",
"givenName": "Rudolf",
"surname": "Prettenthaler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Friedrich Fraundorfer",
"givenName": "Friedrich",
"surname": "Fraundorfer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Horst Bischof",
"givenName": "Horst",
"surname": "Bischof",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "2501-2510",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0457c492",
"articleId": "12OmNBBQZt3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0457c511",
"articleId": "12OmNxj2348",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2000/6478/0/64780067",
"title": "New Techniques for Topologically Correct Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780067/12OmNAtaS1P",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a692",
"title": "3D Surface Extraction Using Incremental Tetrahedra Carving",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a692/12OmNC3XhgZ",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a157",
"title": "Incremental Division of Very Large Point Clouds for Scalable 3D Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a157/12OmNvT2p0I",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761263",
"title": "Topologically correct surface reconstruction using alpha shapes and relations to ball-pivoting",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761263/12OmNwErpUf",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028293",
"title": "Constrained Delaunay triangulation for multiresolution surface description",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028293/12OmNxR5USd",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460309",
"title": "Genus refinement of a manifold surface reconstructed by sculpting the 3d-Delaunay triangulation of Structure-from-Motion points",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460309/12OmNypIYwx",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08630046",
"title": "Hierarchical Surface Prediction",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08630046/17D45Xcttk2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a338",
"title": "Distributed Surface Reconstruction from Point Cloud for City-Scale Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a338/1ezRC63EFxK",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800a967",
"title": "SSRNet: Scalable 3D Surface Reconstruction Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800a967/1m3nKc80MlG",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b432",
"title": "Efficiently Distributed Watertight Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b432/1zWE7llklAA",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNs5rkLU",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"acronym": "icii",
"groupId": "1002214",
"volume": "3",
"displayVolume": "3",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNylboL1",
"doi": "10.1109/ICII.2001.983095",
"title": "Displaying IFS attractor based on fixed point",
"normalizedTitle": "Displaying IFS attractor based on fixed point",
"abstract": "This paper first introduces iterated function system (IFS) theory briefly, and points out the defects of random algorithm for computing IFS attractor. With the property of similarity between different regions of IFS attractor, this paper presents the principle and method of gradually displaying IFS attractor from one fixed point of an invertible affine transformation. Experimental results show that this method is effective and resolves some problems of random algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper first introduces iterated function system (IFS) theory briefly, and points out the defects of random algorithm for computing IFS attractor. With the property of similarity between different regions of IFS attractor, this paper presents the principle and method of gradually displaying IFS attractor from one fixed point of an invertible affine transformation. Experimental results show that this method is effective and resolves some problems of random algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper first introduces iterated function system (IFS) theory briefly, and points out the defects of random algorithm for computing IFS attractor. With the property of similarity between different regions of IFS attractor, this paper presents the principle and method of gradually displaying IFS attractor from one fixed point of an invertible affine transformation. Experimental results show that this method is effective and resolves some problems of random algorithm.",
"fno": "00983095",
"keywords": [
"Computational Geometry",
"Iterated Function System Theory",
"Random Algorithm",
"Euclidean Geometry",
"Fractals",
"Shape",
"Computer Displays",
"Signal Processing Algorithms",
"Geometry",
"Computational Intelligence Society",
"Wheels",
"Buildings",
"Clouds",
"Veins"
],
"authors": [
{
"affiliation": "Sch. of Inf. Eng., Univ. of Sci. & Technol., Beijing, China",
"fullName": "Zhang Yi-Shun",
"givenName": null,
"surname": "Zhang Yi-Shun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yang Yang",
"givenName": null,
"surname": "Yang Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-01-01T00:00:00",
"pubType": "proceedings",
"pages": "433,434,435,436,437",
"year": "2001",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00983094",
"articleId": "12OmNzBwGsG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00983096",
"articleId": "12OmNCvLXYl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/soac/1990/2031/0/00082189",
"title": "PC based software for real time decoding and partial encoding IFS codes",
"doi": null,
"abstractUrl": "/proceedings-article/soac/1990/00082189/12OmNAHmOvw",
"parentPublication": {
"id": "proceedings/soac/1990/2031/0",
"title": "Proceedings of the 1990 Symposium on Applied Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsa/2009/3701/0/3701a132",
"title": "IFS Matlab Generator: A Computer Tool for Displaying IFS Fractals",
"doi": null,
"abstractUrl": "/proceedings-article/iccsa/2009/3701a132/12OmNBDyA8H",
"parentPublication": {
"id": "proceedings/iccsa/2009/3701/0",
"title": "2009 International Conference on Computational Science and Its Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifcsta/2009/3930/1/3930a294",
"title": "Study on Fractal Images Construction with Topology Invariance IFS Attractor",
"doi": null,
"abstractUrl": "/proceedings-article/ifcsta/2009/3930a294/12OmNBiygBK",
"parentPublication": {
"id": "proceedings/ifcsta/2009/3930/3",
"title": "Computer Science-Technology and Applications, International Forum on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576449",
"title": "Adaptive Delaunay triangulation for attractor image coding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576449/12OmNxGja3X",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy2agRS",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"acronym": "cad-graphics",
"groupId": "1001488",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzgNXQG",
"doi": "10.1109/CADGraphics.2013.12",
"title": "Robust Surface Consolidation of Scanned Thick Point Clouds",
"normalizedTitle": "Robust Surface Consolidation of Scanned Thick Point Clouds",
"abstract": "This paper proposes a consolidation method for scanned point clouds that are usually corrupted by noises, outliers, and thickness. At the beginning, we construct neighborhood of a point based on hared nearest neighbor relationship. Then, the points with few number of neighbors are regarded as outliers and removed. After that, we propose a feature-aware projection operator to thin the thick point clouds by considering spatial distances, normal diversifications, and the squash directions of thick point clouds. Experiment results of scanned point clouds show that our method can onsolidate the thick point clouds while preserving sharp features and geometry details.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a consolidation method for scanned point clouds that are usually corrupted by noises, outliers, and thickness. At the beginning, we construct neighborhood of a point based on hared nearest neighbor relationship. Then, the points with few number of neighbors are regarded as outliers and removed. After that, we propose a feature-aware projection operator to thin the thick point clouds by considering spatial distances, normal diversifications, and the squash directions of thick point clouds. Experiment results of scanned point clouds show that our method can onsolidate the thick point clouds while preserving sharp features and geometry details.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a consolidation method for scanned point clouds that are usually corrupted by noises, outliers, and thickness. At the beginning, we construct neighborhood of a point based on hared nearest neighbor relationship. Then, the points with few number of neighbors are regarded as outliers and removed. After that, we propose a feature-aware projection operator to thin the thick point clouds by considering spatial distances, normal diversifications, and the squash directions of thick point clouds. Experiment results of scanned point clouds show that our method can onsolidate the thick point clouds while preserving sharp features and geometry details.",
"fno": "06814975",
"keywords": [
"Noise",
"Surface Reconstruction",
"Solid Modeling",
"Robustness",
"Educational Institutions",
"Geometry",
"Noise Measurement",
"Feature Preserving Reconstruction",
"Consolidation",
"Thick Point Clouds"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiaochao Wang",
"givenName": "Xiaochao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiuping Liu",
"givenName": "Xiuping",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hong Qin",
"givenName": "Hong",
"surname": "Qin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cad-graphics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-11-01T00:00:00",
"pubType": "proceedings",
"pages": "38-43",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2576-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06814974",
"articleId": "12OmNAle6tC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06814976",
"articleId": "12OmNyrZLDi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391c147",
"title": "Procedural Editing of 3D Building Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391c147/12OmNAlvHzS",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c372",
"title": "PolyFit: Polygonal Surface Reconstruction from Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c372/12OmNBRKwBF",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130375",
"title": "Consolidation of multiple depth maps",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130375/12OmNBtUdHC",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477626",
"title": "IPDC: Iterative part-based dense correspondence between point clouds",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477626/12OmNrJAdYu",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a083",
"title": "Robust Feature-Preserving Denoising of 3D Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a083/12OmNyRxFIQ",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2012/4789/0/4789a227",
"title": "A Rapid Surface Remodeling of Human Body Based on Scanned Data",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2012/4789a227/12OmNzWx06g",
"parentPublication": {
"id": "proceedings/iccis/2012/4789/0",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2012/03/mcg2012030070",
"title": "Iterative Consolidation of Unorganized Point Clouds",
"doi": null,
"abstractUrl": "/magazine/cg/2012/03/mcg2012030070/13rRUwInvDh",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/culture-and-computing/2017/1135/0/08227334",
"title": "Improving Transparent Visualization of Large-Scale Laser-Scanned Point Clouds by Using Poisson Disk Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/culture-and-computing/2017/08227334/17D45XERmmb",
"parentPublication": {
"id": "proceedings/culture-and-computing/2017/1135/0",
"title": "2017 International Conference on Culture and Computing (Culture and Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8920",
"title": "Surface Representation for Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8920/1H1jmGGv0eQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2019/1419/0/09146055",
"title": "Multiscale Point Cloud Optimization for SfM Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2019/09146055/1lFJcIzkAg0",
"parentPublication": {
"id": "proceedings/icci*cc/2019/1419/0",
"title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwcl7Jy",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"acronym": "icinis",
"groupId": "1002524",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzl3WTy",
"doi": "10.1109/ICINIS.2010.16",
"title": "Reconstruction of Broken Blade Geometry Model Based on Reverse Engineering",
"normalizedTitle": "Reconstruction of Broken Blade Geometry Model Based on Reverse Engineering",
"abstract": "To improve the efficiency of blade repair, a reverse engineering-based geometry reconstruction method is developed for the normal model creation of a broken blade. The key techniques involved are including the digital measurement of broken blade, point cloud pre-processing, three dimensional models and so on. After the point cloud pre-processing, the segmentation extracts the damaged region from point cloud by a 4D Shepard surface based curvature estimation method. Bezier curve is used to construct the extracted boundary points. Bezier surface is reconstructed by the extracted points and is used to broken space subdivision. At last, a geometry model is built for the preparement of the laser forming repair.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To improve the efficiency of blade repair, a reverse engineering-based geometry reconstruction method is developed for the normal model creation of a broken blade. The key techniques involved are including the digital measurement of broken blade, point cloud pre-processing, three dimensional models and so on. After the point cloud pre-processing, the segmentation extracts the damaged region from point cloud by a 4D Shepard surface based curvature estimation method. Bezier curve is used to construct the extracted boundary points. Bezier surface is reconstructed by the extracted points and is used to broken space subdivision. At last, a geometry model is built for the preparement of the laser forming repair.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To improve the efficiency of blade repair, a reverse engineering-based geometry reconstruction method is developed for the normal model creation of a broken blade. The key techniques involved are including the digital measurement of broken blade, point cloud pre-processing, three dimensional models and so on. After the point cloud pre-processing, the segmentation extracts the damaged region from point cloud by a 4D Shepard surface based curvature estimation method. Bezier curve is used to construct the extracted boundary points. Bezier surface is reconstructed by the extracted points and is used to broken space subdivision. At last, a geometry model is built for the preparement of the laser forming repair.",
"fno": "4249a680",
"keywords": [
"Reverse Engineering",
"Laser Forming Repair",
"Broken Blade",
"Boundary Extraction",
"Geometry Reconstruction"
],
"authors": [
{
"affiliation": null,
"fullName": "Jinhua Li",
"givenName": "Jinhua",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fangping Yao",
"givenName": "Fangping",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yongxian Liu",
"givenName": "Yongxian",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yongguo Wu",
"givenName": "Yongguo",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icinis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-11-01T00:00:00",
"pubType": "proceedings",
"pages": "680-682",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4249-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4249a676",
"articleId": "12OmNrMZpof",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4249a683",
"articleId": "12OmNB0nWf6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icse/2011/0445/0/04451010",
"title": "ReAssert: a tool for repairing broken unit tests",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2011/04451010/12OmNAnuTkW",
"parentPublication": {
"id": "proceedings/icse/2011/0445/0",
"title": "2011 33rd International Conference on Software Engineering (ICSE 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmp/2004/2078/0/20780311",
"title": "An Engineering Rules based Parameterization Approach for Turbine Blade Reverse Engineering",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2004/20780311/12OmNBpVQ9t",
"parentPublication": {
"id": "proceedings/gmp/2004/2078/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2010/4047/3/4047c050",
"title": "Reconstruction of steam turbine blade twisted based on NURBS surface",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/4047c050/12OmNyKa67w",
"parentPublication": {
"id": "proceedings/icic/2010/4047/3",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tase/2009/3757/0/3757a165",
"title": "Program Repair as Sound Optimization of Broken Programs",
"doi": null,
"abstractUrl": "/proceedings-article/tase/2009/3757a165/12OmNyvY9A3",
"parentPublication": {
"id": "proceedings/tase/2009/3757/0",
"title": "2009 Third IEEE International Symposium on Theoretical Aspects of Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/2/3962c390",
"title": "High-Precision Modeling of Turbine Blade from Cross-Section Data",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962c390/12OmNzvz6Dr",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/2",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10091218",
"title": "Learning by Restoring Broken 3D Geometry",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10091218/1M2IGOVM2M8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XDIXTB",
"doi": "10.1109/CVPR.2018.00295",
"title": "PU-Net: Point Cloud Upsampling Network",
"normalizedTitle": "PU-Net: Point Cloud Upsampling Network",
"abstract": "Learning and analyzing 3D point clouds with deep networks is challenging due to the sparseness and irregularity of the data. In this paper, we present a data-driven point cloud upsampling technique. The key idea is to learn multi-level features per point and expand the point set via a multi-branch convolution unit implicitly in feature space. The expanded feature is then split to a multitude of features, which are then reconstructed to an upsampled point set. Our network is applied at a patch-level, with a joint loss function that encourages the upsampled points to remain on the underlying surface with a uniform distribution. We conduct various experiments using synthesis and scan data to evaluate our method and demonstrate its superiority over some baseline methods and an optimization-based method. Results show that our upsampled points have better uniformity and are located closer to the underlying surfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Learning and analyzing 3D point clouds with deep networks is challenging due to the sparseness and irregularity of the data. In this paper, we present a data-driven point cloud upsampling technique. The key idea is to learn multi-level features per point and expand the point set via a multi-branch convolution unit implicitly in feature space. The expanded feature is then split to a multitude of features, which are then reconstructed to an upsampled point set. Our network is applied at a patch-level, with a joint loss function that encourages the upsampled points to remain on the underlying surface with a uniform distribution. We conduct various experiments using synthesis and scan data to evaluate our method and demonstrate its superiority over some baseline methods and an optimization-based method. Results show that our upsampled points have better uniformity and are located closer to the underlying surfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Learning and analyzing 3D point clouds with deep networks is challenging due to the sparseness and irregularity of the data. In this paper, we present a data-driven point cloud upsampling technique. The key idea is to learn multi-level features per point and expand the point set via a multi-branch convolution unit implicitly in feature space. The expanded feature is then split to a multitude of features, which are then reconstructed to an upsampled point set. Our network is applied at a patch-level, with a joint loss function that encourages the upsampled points to remain on the underlying surface with a uniform distribution. We conduct various experiments using synthesis and scan data to evaluate our method and demonstrate its superiority over some baseline methods and an optimization-based method. Results show that our upsampled points have better uniformity and are located closer to the underlying surfaces.",
"fno": "642000c790",
"keywords": [
"Feature Extraction",
"Image Reconstruction",
"Image Representation",
"Learning Artificial Intelligence",
"Point Cloud Upsampling Network",
"Deep Networks",
"Data Driven Point Cloud Upsampling Technique",
"Multilevel Features",
"Multibranch Convolution Unit",
"Feature Space",
"Expanded Feature",
"Upsampled Point",
"Three Dimensional Displays",
"Feature Extraction",
"Convolution",
"Geometry",
"Training",
"Surface Reconstruction",
"Image Reconstruction"
],
"authors": [
{
"affiliation": null,
"fullName": "Lequan Yu",
"givenName": "Lequan",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xianzhi Li",
"givenName": "Xianzhi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chi-Wing Fu",
"givenName": "Chi-Wing",
"surname": "Fu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daniel Cohen-Or",
"givenName": "Daniel",
"surname": "Cohen-Or",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pheng-Ann Heng",
"givenName": "Pheng-Ann",
"surname": "Heng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2790-2799",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000c780",
"articleId": "17D45WZZ7FU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000c800",
"articleId": "17D45VObpOU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "mags/cg/2012/03/mcg2012030070",
"title": "Iterative Consolidation of Unorganized Point Clouds",
"doi": null,
"abstractUrl": "/magazine/cg/2012/03/mcg2012030070/13rRUwInvDh",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200q6188",
"title": "PU-EVA: An Edge-Vector based Approximation Solution for Flexible-scale Point Cloud Upsampling",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200q6188/1BmF0g5VGX6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09850404",
"title": "PU-Flow: a Point Cloud Upsampling Network with Normalizing Flows",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09850404/1Fz4SEQnoiY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859662",
"title": "“Zero-Shot” Point Cloud Upsampling",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859662/1G9EpZI4WbK",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c323",
"title": "Density-preserving Deep Point Cloud Compression",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c323/1H0ODd9D5uM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10044160",
"title": "Flattening-Net: Deep Regular 2D Representation for 3D Point Cloud Analysis",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10044160/1KL6TgYfsLC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h202",
"title": "PU-GAN: A Point Cloud Upsampling Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h202/1hVlmofrMd2",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09351772",
"title": "Meta-PU: An Arbitrary-Scale Upsampling Network for Point Cloud",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09351772/1r50ATC6YnK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1678",
"title": "PU-GCN: Point Cloud Upsampling using Graph Convolutional Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1678/1yeJFgHQHmM",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a344",
"title": "Point Cloud Upsampling via Disentangled Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a344/1yeJtTxqpTG",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqH9hnp",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBKW9vz",
"doi": "10.1109/CVPR.2016.448",
"title": "Rolling Shutter Camera Relative Pose: Generalized Epipolar Geometry",
"normalizedTitle": "Rolling Shutter Camera Relative Pose: Generalized Epipolar Geometry",
"abstract": "The vast majority of modern consumer-grade cameras employ a rolling shutter mechanism. In dynamic geometric computer vision applications such as visual SLAM, the so-called rolling shutter effect therefore needs to be properly taken into account. A dedicated relative pose solver appears to be the first problem to solve, as it is of eminent importance to bootstrap any derivation of multi-view geometry. However, despite its significance, it has received inadequate attention to date. This paper presents a detailed investigation of the geometry of the rolling shutter relative pose problem. We introduce the rolling shutter essential matrix, and establish its link to existing models such as the push-broom cameras, summarized in a clean hierarchy of multi-perspective cameras. The generalization of well-established concepts from epipolar geometry is completed by a definition of the Sampson distance in the rolling shutter case. The work is concluded with a careful investigation of the introduced epipolar geometry for rolling shutter cameras on several dedicated benchmarks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The vast majority of modern consumer-grade cameras employ a rolling shutter mechanism. In dynamic geometric computer vision applications such as visual SLAM, the so-called rolling shutter effect therefore needs to be properly taken into account. A dedicated relative pose solver appears to be the first problem to solve, as it is of eminent importance to bootstrap any derivation of multi-view geometry. However, despite its significance, it has received inadequate attention to date. This paper presents a detailed investigation of the geometry of the rolling shutter relative pose problem. We introduce the rolling shutter essential matrix, and establish its link to existing models such as the push-broom cameras, summarized in a clean hierarchy of multi-perspective cameras. The generalization of well-established concepts from epipolar geometry is completed by a definition of the Sampson distance in the rolling shutter case. The work is concluded with a careful investigation of the introduced epipolar geometry for rolling shutter cameras on several dedicated benchmarks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The vast majority of modern consumer-grade cameras employ a rolling shutter mechanism. In dynamic geometric computer vision applications such as visual SLAM, the so-called rolling shutter effect therefore needs to be properly taken into account. A dedicated relative pose solver appears to be the first problem to solve, as it is of eminent importance to bootstrap any derivation of multi-view geometry. However, despite its significance, it has received inadequate attention to date. This paper presents a detailed investigation of the geometry of the rolling shutter relative pose problem. We introduce the rolling shutter essential matrix, and establish its link to existing models such as the push-broom cameras, summarized in a clean hierarchy of multi-perspective cameras. The generalization of well-established concepts from epipolar geometry is completed by a definition of the Sampson distance in the rolling shutter case. The work is concluded with a careful investigation of the introduced epipolar geometry for rolling shutter cameras on several dedicated benchmarks.",
"fno": "07780817",
"keywords": [
"Cameras",
"Geometry",
"Computer Vision",
"Three Dimensional Displays",
"Context",
"Trajectory",
"Standards"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuchao Dai",
"givenName": "Yuchao",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hongdong Li",
"givenName": "Hongdong",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Laurent Kneip",
"givenName": "Laurent",
"surname": "Kneip",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4132-4140",
"year": "2016",
"issn": "1063-6919",
"isbn": "978-1-4673-8851-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07780816",
"articleId": "12OmNyOq53c",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07780818",
"articleId": "12OmNButpZF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2012/1226/0/181P2A31",
"title": "Rolling shutter bundle adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/181P2A31/12OmNAsk4zp",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a882",
"title": "Rolling Shutter Correction in Manhattan World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a882/12OmNCfSqHP",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a465",
"title": "Rolling Shutter Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a465/12OmNqFrGvu",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2001/1143/1/00937564",
"title": "Cheirality in epipolar geometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/00937564/12OmNz6iOGN",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459408",
"title": "Structure and kinematics triangulation with a rolling shutter stereo rig",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459408/12OmNzBwGtZ",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000e824",
"title": "Rolling Shutter and Radial Distortion are Features for High Frame Rate Multi-camera Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000e824/17D45WODaoT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08621045",
"title": "Rolling Shutter Camera Absolute Pose",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08621045/17D45XERmmI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200e521",
"title": "SUNet: Symmetric Undistortion Network for Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e521/1BmL5Z4I1wY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09926197",
"title": "Rolling Shutter Inversion: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09926197/1HGJ3Pb5VzW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c502",
"title": "From Two Rolling Shutters to One Global Shutter",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c502/1m3nX4WDZss",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCfSqHP",
"doi": "10.1109/ICCV.2017.101",
"title": "Rolling Shutter Correction in Manhattan World",
"normalizedTitle": "Rolling Shutter Correction in Manhattan World",
"abstract": "A vast majority of consumer cameras operate the rolling shutter mechanism, which often produces distorted images due to inter-row delay while capturing an image. Recent methods for monocular rolling shutter compensation utilize blur kernel, straightness of line segments, as well as angle and length preservation. However, they do not incorporate scene geometry explicitly for rolling shutter correction, therefore, information about the 3D scene geometry is often distorted by the correction process. In this paper we propose a novel method which leverages geometric properties of the scene-in particular vanishing directions-to estimate the camera motion during rolling shutter exposure from a single distorted image. The proposed method jointly estimates the orthogonal vanishing directions and the rolling shutter camera motion. We performed extensive experiments on synthetic and real datasets which demonstrate the benefits of our approach both in terms of qualitative and quantitative results (in terms of a geometric structure fitting) as well as with respect to computation time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A vast majority of consumer cameras operate the rolling shutter mechanism, which often produces distorted images due to inter-row delay while capturing an image. Recent methods for monocular rolling shutter compensation utilize blur kernel, straightness of line segments, as well as angle and length preservation. However, they do not incorporate scene geometry explicitly for rolling shutter correction, therefore, information about the 3D scene geometry is often distorted by the correction process. In this paper we propose a novel method which leverages geometric properties of the scene-in particular vanishing directions-to estimate the camera motion during rolling shutter exposure from a single distorted image. The proposed method jointly estimates the orthogonal vanishing directions and the rolling shutter camera motion. We performed extensive experiments on synthetic and real datasets which demonstrate the benefits of our approach both in terms of qualitative and quantitative results (in terms of a geometric structure fitting) as well as with respect to computation time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A vast majority of consumer cameras operate the rolling shutter mechanism, which often produces distorted images due to inter-row delay while capturing an image. Recent methods for monocular rolling shutter compensation utilize blur kernel, straightness of line segments, as well as angle and length preservation. However, they do not incorporate scene geometry explicitly for rolling shutter correction, therefore, information about the 3D scene geometry is often distorted by the correction process. In this paper we propose a novel method which leverages geometric properties of the scene-in particular vanishing directions-to estimate the camera motion during rolling shutter exposure from a single distorted image. The proposed method jointly estimates the orthogonal vanishing directions and the rolling shutter camera motion. We performed extensive experiments on synthetic and real datasets which demonstrate the benefits of our approach both in terms of qualitative and quantitative results (in terms of a geometric structure fitting) as well as with respect to computation time.",
"fno": "1032a882",
"keywords": [
"Computational Geometry",
"Image Motion Analysis",
"Image Resolution",
"Image Restoration",
"Image Segmentation",
"Image Sensors",
"Motion Estimation",
"Rolling Shutter Correction",
"Consumer Cameras",
"Rolling Shutter Mechanism",
"Distorted Images",
"Inter Row Delay",
"Length Preservation",
"3 D Scene Geometry",
"Correction Process",
"Shutter Exposure",
"Single Distorted Image",
"Rolling Shutter Camera Motion",
"Monocular Rolling Shutter Compensation",
"Vanishing Directions",
"Blur Kernel",
"Orthogonal Vanishing Directions",
"Cameras",
"Geometry",
"Estimation",
"Sensors",
"Image Segmentation",
"Three Dimensional Displays",
"Distortion"
],
"authors": [
{
"affiliation": null,
"fullName": "Pulak Purkait",
"givenName": "Pulak",
"surname": "Purkait",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christopher Zach",
"givenName": "Christopher",
"surname": "Zach",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ales Leonardis",
"givenName": "Ales",
"surname": "Leonardis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "882-890",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032a873",
"articleId": "12OmNxFsmpn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032a891",
"articleId": "12OmNwp74DR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032a948",
"title": "Rolling-Shutter-Aware Differential SfM and Image Rectification",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a948/12OmNC2OSNC",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601a903",
"title": "Minimal Solvers for Monocular Rolling Shutter Compensation Under Ackermann Motion",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a903/12OmNwDACeB",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926731",
"title": "Temporally Coded Illumination for Rolling Shutter Motion De-blurring",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926731/12OmNxd4tA0",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459408",
"title": "Structure and kinematics triangulation with a rolling shutter stereo rig",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459408/12OmNzBwGtZ",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/10/07748513",
"title": "Image Registration and Change Detection under Rolling Shutter Motion Blur",
"doi": null,
"abstractUrl": "/journal/tp/2017/10/07748513/13rRUynHukw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a636",
"title": "Occlusion-Aware Rolling Shutter Rectification of 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a636/17D45W9KVJx",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000e824",
"title": "Rolling Shutter and Radial Distortion are Features for High Frame Rate Multi-camera Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000e824/17D45WODaoT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200e521",
"title": "SUNet: Symmetric Undistortion Network for Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e521/1BmL5Z4I1wY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7764",
"title": "Learning Adaptive Warping for RealWorld Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7764/1H0LjGTa97a",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c502",
"title": "From Two Rolling Shutters to One Global Shutter",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c502/1m3nX4WDZss",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyFCvPo",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqFrGvu",
"doi": "10.1109/ICCV.2013.64",
"title": "Rolling Shutter Stereo",
"normalizedTitle": "Rolling Shutter Stereo",
"abstract": "A huge fraction of cameras used nowadays is based on CMOS sensors with a rolling shutter that exposes the image line by line. For dynamic scenes/cameras this introduces undesired effects like stretch, shear and wobble. It has been shown earlier that rotational shake induced rolling shutter effects in hand-held cell phone capture can be compensated based on an estimate of the camera rotation. In contrast, we analyse the case of significant camera motion, e.g.\\ where a bypassing street level capture vehicle uses a rolling shutter camera in a 3D reconstruction framework. The introduced error is depth dependent and cannot be compensated based on camera motion/rotation alone, invalidating also rectification for stereo camera systems. On top, significant lens distortion as often present in wide angle cameras intertwines with rolling shutter effects as it changes the time at which a certain 3D point is seen. We show that naive 3D reconstructions (assuming global shutter) will deliver biased geometry already for very mild assumptions on vehicle speed and resolution. We then develop rolling shutter dense multiview stereo algorithms that solve for time of exposure and depth at the same time, even in the presence of lens distortion and perform an evaluation on ground truth laser scan models as well as on real street-level data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A huge fraction of cameras used nowadays is based on CMOS sensors with a rolling shutter that exposes the image line by line. For dynamic scenes/cameras this introduces undesired effects like stretch, shear and wobble. It has been shown earlier that rotational shake induced rolling shutter effects in hand-held cell phone capture can be compensated based on an estimate of the camera rotation. In contrast, we analyse the case of significant camera motion, e.g.\\ where a bypassing street level capture vehicle uses a rolling shutter camera in a 3D reconstruction framework. The introduced error is depth dependent and cannot be compensated based on camera motion/rotation alone, invalidating also rectification for stereo camera systems. On top, significant lens distortion as often present in wide angle cameras intertwines with rolling shutter effects as it changes the time at which a certain 3D point is seen. We show that naive 3D reconstructions (assuming global shutter) will deliver biased geometry already for very mild assumptions on vehicle speed and resolution. We then develop rolling shutter dense multiview stereo algorithms that solve for time of exposure and depth at the same time, even in the presence of lens distortion and perform an evaluation on ground truth laser scan models as well as on real street-level data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A huge fraction of cameras used nowadays is based on CMOS sensors with a rolling shutter that exposes the image line by line. For dynamic scenes/cameras this introduces undesired effects like stretch, shear and wobble. It has been shown earlier that rotational shake induced rolling shutter effects in hand-held cell phone capture can be compensated based on an estimate of the camera rotation. In contrast, we analyse the case of significant camera motion, e.g.\\ where a bypassing street level capture vehicle uses a rolling shutter camera in a 3D reconstruction framework. The introduced error is depth dependent and cannot be compensated based on camera motion/rotation alone, invalidating also rectification for stereo camera systems. On top, significant lens distortion as often present in wide angle cameras intertwines with rolling shutter effects as it changes the time at which a certain 3D point is seen. We show that naive 3D reconstructions (assuming global shutter) will deliver biased geometry already for very mild assumptions on vehicle speed and resolution. We then develop rolling shutter dense multiview stereo algorithms that solve for time of exposure and depth at the same time, even in the presence of lens distortion and perform an evaluation on ground truth laser scan models as well as on real street-level data.",
"fno": "2840a465",
"keywords": [
"Rolling Shutter",
"Stereo"
],
"authors": [
{
"affiliation": null,
"fullName": "Olivier Saurer",
"givenName": "Olivier",
"surname": "Saurer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kevin Koser",
"givenName": "Kevin",
"surname": "Koser",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jean-Yves Bouguet",
"givenName": "Jean-Yves",
"surname": "Bouguet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marc Pollefeys",
"givenName": "Marc",
"surname": "Pollefeys",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "465-472",
"year": "2013",
"issn": "1550-5499",
"isbn": "978-1-4799-2840-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2840a457",
"articleId": "12OmNAnMuw2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2840a473",
"articleId": "12OmNrYlmPh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2015/6964/0/07298760",
"title": "Rolling shutter motion deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298760/12OmNAkWvdx",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a882",
"title": "Rolling Shutter Correction in Manhattan World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a882/12OmNCfSqHP",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989b360",
"title": "Rolling Shutter Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989b360/12OmNwI8cdI",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298842",
"title": "R6P - Rolling shutter absolute pose problem",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298842/12OmNxeM47F",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2012/1662/0/06215213",
"title": "Calibration-free rolling shutter removal",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2012/06215213/12OmNyRg4Ak",
"parentPublication": {
"id": "proceedings/iccp/2012/1662/0",
"title": "2012 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459408",
"title": "Structure and kinematics triangulation with a rolling shutter stereo rig",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459408/12OmNzBwGtZ",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08621045",
"title": "Rolling Shutter Camera Absolute Pose",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08621045/17D45XERmmI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200e521",
"title": "SUNet: Symmetric Undistortion Network for Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e521/1BmL5Z4I1wY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09926197",
"title": "Rolling Shutter Inversion: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09926197/1HGJ3Pb5VzW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c502",
"title": "From Two Rolling Shutters to One Global Shutter",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c502/1m3nX4WDZss",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBDyAaZ",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMHObK",
"doi": "10.1109/ICCV.2015.261",
"title": "Dense Continuous-Time Tracking and Mapping with Rolling Shutter RGB-D Cameras",
"normalizedTitle": "Dense Continuous-Time Tracking and Mapping with Rolling Shutter RGB-D Cameras",
"abstract": "We propose a dense continuous-time tracking and mapping method for RGB-D cameras. We parametrize the camera trajectory using continuous B-splines and optimize the trajectory through dense, direct image alignment. Our method also directly models rolling shutter in both RGB and depth images within the optimization, which improves tracking and reconstruction quality for low-cost CMOS sensors. Using a continuous trajectory representation has a number of advantages over a discrete-time representation (e.g. camera poses at the frame interval). With splines, less variables need to be optimized than with a discrete representation, since the trajectory can be represented with fewer control points than frames. Splines also naturally include smoothness constraints on derivatives of the trajectory estimate. Finally, the continuous trajectory representation allows to compensate for rolling shutter effects, since a pose estimate is available at any exposure time of an image. Our approach demonstrates superior quality in tracking and reconstruction compared to approaches with discrete-time or global shutter assumptions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a dense continuous-time tracking and mapping method for RGB-D cameras. We parametrize the camera trajectory using continuous B-splines and optimize the trajectory through dense, direct image alignment. Our method also directly models rolling shutter in both RGB and depth images within the optimization, which improves tracking and reconstruction quality for low-cost CMOS sensors. Using a continuous trajectory representation has a number of advantages over a discrete-time representation (e.g. camera poses at the frame interval). With splines, less variables need to be optimized than with a discrete representation, since the trajectory can be represented with fewer control points than frames. Splines also naturally include smoothness constraints on derivatives of the trajectory estimate. Finally, the continuous trajectory representation allows to compensate for rolling shutter effects, since a pose estimate is available at any exposure time of an image. Our approach demonstrates superior quality in tracking and reconstruction compared to approaches with discrete-time or global shutter assumptions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a dense continuous-time tracking and mapping method for RGB-D cameras. We parametrize the camera trajectory using continuous B-splines and optimize the trajectory through dense, direct image alignment. Our method also directly models rolling shutter in both RGB and depth images within the optimization, which improves tracking and reconstruction quality for low-cost CMOS sensors. Using a continuous trajectory representation has a number of advantages over a discrete-time representation (e.g. camera poses at the frame interval). With splines, less variables need to be optimized than with a discrete representation, since the trajectory can be represented with fewer control points than frames. Splines also naturally include smoothness constraints on derivatives of the trajectory estimate. Finally, the continuous trajectory representation allows to compensate for rolling shutter effects, since a pose estimate is available at any exposure time of an image. Our approach demonstrates superior quality in tracking and reconstruction compared to approaches with discrete-time or global shutter assumptions.",
"fno": "8391c264",
"keywords": [
"Cameras",
"Trajectory",
"Splines Mathematics",
"Simultaneous Localization And Mapping",
"Three Dimensional Displays",
"Image Reconstruction"
],
"authors": [
{
"affiliation": null,
"fullName": "Christian Kerl",
"givenName": "Christian",
"surname": "Kerl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jörg Stückler",
"givenName": "Jörg",
"surname": "Stückler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daniel Cremers",
"givenName": "Daniel",
"surname": "Cremers",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "2264-2272",
"year": "2015",
"issn": "2380-7504",
"isbn": "978-1-4673-8391-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8391c255",
"articleId": "12OmNzUgcXm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8391c273",
"articleId": "12OmNvqmUEa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2015/6964/0/07298760",
"title": "Rolling shutter motion deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298760/12OmNAkWvdx",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/181P2A31",
"title": "Rolling shutter bundle adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/181P2A31/12OmNAsk4zp",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d337",
"title": "Sparse to Dense 3D Reconstruction from Rolling Shutter Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d337/12OmNvAAtiF",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0/07024256",
"title": "A Fast Visual Odometry and Mapping System for RGB-D Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/sbrlarsrobocontrol/2014/07024256/12OmNylboJA",
"parentPublication": {
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0",
"title": "2014 Joint Conference on Robotics: SBR-LARS Robotics Symposium and Robocontrol (SBR LARS Robocontrol)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/micai/2014/7010/0/07222865",
"title": "A Topological SPLAM Approach for Robust Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/micai/2014/07222865/12OmNzmclyI",
"parentPublication": {
"id": "proceedings/micai/2014/7010/0",
"title": "2014 13th Mexican International Conference on Artificial Intelligence (MICAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523411",
"title": "Towards Kilo-Hertz 6-DoF Visual Tracking Using an Egocentric Cluster of Rolling Shutter Cameras",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523411/13rRUwjXZSi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/04/08325527",
"title": "Accurate 3D Reconstruction from Small Motion Clip for Rolling Shutter Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2019/04/08325527/13rRUxlgxXO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09926197",
"title": "Rolling Shutter Inversion: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09926197/1HGJ3Pb5VzW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a978",
"title": "HyperSLAM: A Generic and Modular Approach to Sensor Fusion and Simultaneous Localization And Mapping in Continuous-Time",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a978/1qyxncPHqBa",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNz2TCuR",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwI8cdI",
"doi": "10.1109/CVPR.2013.179",
"title": "Rolling Shutter Camera Calibration",
"normalizedTitle": "Rolling Shutter Camera Calibration",
"abstract": "Rolling Shutter (RS) cameras are used across a wide range of consumer electronic devices-from smart-phones to high-end cameras. It is well known, that if a RS camera is used with a moving camera or scene, significant image distortions are introduced. The quality or even success of structure from motion on rolling shutter images requires the usual intrinsic parameters such as focal length and distortion coefficients as well as accurate modelling of the shutter timing. The current state-of-the-art technique for calibrating the shutter timings requires specialised hardware. We present a new method that only requires video of a known calibration pattern. Experimental results on over 60 real datasets show that our method is more accurate than the current state of the art.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Rolling Shutter (RS) cameras are used across a wide range of consumer electronic devices-from smart-phones to high-end cameras. It is well known, that if a RS camera is used with a moving camera or scene, significant image distortions are introduced. The quality or even success of structure from motion on rolling shutter images requires the usual intrinsic parameters such as focal length and distortion coefficients as well as accurate modelling of the shutter timing. The current state-of-the-art technique for calibrating the shutter timings requires specialised hardware. We present a new method that only requires video of a known calibration pattern. Experimental results on over 60 real datasets show that our method is more accurate than the current state of the art.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Rolling Shutter (RS) cameras are used across a wide range of consumer electronic devices-from smart-phones to high-end cameras. It is well known, that if a RS camera is used with a moving camera or scene, significant image distortions are introduced. The quality or even success of structure from motion on rolling shutter images requires the usual intrinsic parameters such as focal length and distortion coefficients as well as accurate modelling of the shutter timing. The current state-of-the-art technique for calibrating the shutter timings requires specialised hardware. We present a new method that only requires video of a known calibration pattern. Experimental results on over 60 real datasets show that our method is more accurate than the current state of the art.",
"fno": "4989b360",
"keywords": [
"Continuous Time Estimation",
"Rolling Shutter Camera",
"Calibration"
],
"authors": [
{
"affiliation": null,
"fullName": "Luc Oth",
"givenName": "Luc",
"surname": "Oth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Paul Furgale",
"givenName": "Paul",
"surname": "Furgale",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Laurent Kneip",
"givenName": "Laurent",
"surname": "Kneip",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Roland Siegwart",
"givenName": "Roland",
"surname": "Siegwart",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1360-1367",
"year": "2013",
"issn": "1063-6919",
"isbn": "978-0-7695-4989-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4989b352",
"articleId": "12OmNC2fGAR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4989b368",
"articleId": "12OmNvqmUGe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2012/1226/0/181P2A31",
"title": "Rolling shutter bundle adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/181P2A31/12OmNAsk4zp",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/07780817",
"title": "Rolling Shutter Camera Relative Pose: Generalized Epipolar Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/07780817/12OmNBKW9vz",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a465",
"title": "Rolling Shutter Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a465/12OmNqFrGvu",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2012/1662/0/06215213",
"title": "Calibration-free rolling shutter removal",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2012/06215213/12OmNyRg4Ak",
"parentPublication": {
"id": "proceedings/iccp/2012/1662/0",
"title": "2012 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a636",
"title": "Occlusion-Aware Rolling Shutter Rectification of 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a636/17D45W9KVJx",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08621045",
"title": "Rolling Shutter Camera Absolute Pose",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08621045/17D45XERmmI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/2.812E213",
"title": "Inverting a Rolling Shutter Camera: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/2.812E213/1BmHTvnAJvq",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09926197",
"title": "Rolling Shutter Inversion: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09926197/1HGJ3Pb5VzW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600e935",
"title": "Joint Video Rolling Shutter Correction and Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600e935/1L6LzmzV9cc",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300e546",
"title": "Learning Structure-And-Motion-Aware Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300e546/1gyr9GVnqOA",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzayNEL",
"title": "2012 IEEE International Conference on Computational Photography (ICCP)",
"acronym": "iccp",
"groupId": "1800125",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyRg4Ak",
"doi": "10.1109/ICCPhot.2012.6215213",
"title": "Calibration-free rolling shutter removal",
"normalizedTitle": "Calibration-free rolling shutter removal",
"abstract": "We present a novel algorithm for efficient removal of rolling shutter distortions in uncalibrated streaming videos. Our proposed method is calibration free as it does not need any knowledge of the camera used, nor does it require calibration using specially recorded calibration sequences. Our algorithm can perform rolling shutter removal under varying focal lengths, as in videos from CMOS cameras equipped with an optical zoom. We evaluate our approach across a broad range of cameras and video sequences demonstrating robustness, scaleability, and repeatability. We also conducted a user study, which demonstrates preference for the output of our algorithm over other state-of-the art methods. Our algorithm is computationally efficient, easy to parallelize, and robust to challenging artifacts introduced by various cameras with differing technologies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel algorithm for efficient removal of rolling shutter distortions in uncalibrated streaming videos. Our proposed method is calibration free as it does not need any knowledge of the camera used, nor does it require calibration using specially recorded calibration sequences. Our algorithm can perform rolling shutter removal under varying focal lengths, as in videos from CMOS cameras equipped with an optical zoom. We evaluate our approach across a broad range of cameras and video sequences demonstrating robustness, scaleability, and repeatability. We also conducted a user study, which demonstrates preference for the output of our algorithm over other state-of-the art methods. Our algorithm is computationally efficient, easy to parallelize, and robust to challenging artifacts introduced by various cameras with differing technologies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel algorithm for efficient removal of rolling shutter distortions in uncalibrated streaming videos. Our proposed method is calibration free as it does not need any knowledge of the camera used, nor does it require calibration using specially recorded calibration sequences. Our algorithm can perform rolling shutter removal under varying focal lengths, as in videos from CMOS cameras equipped with an optical zoom. We evaluate our approach across a broad range of cameras and video sequences demonstrating robustness, scaleability, and repeatability. We also conducted a user study, which demonstrates preference for the output of our algorithm over other state-of-the art methods. Our algorithm is computationally efficient, easy to parallelize, and robust to challenging artifacts introduced by various cameras with differing technologies.",
"fno": "06215213",
"keywords": [
"CMOS Image Sensors",
"Distortion",
"Image Sequences",
"Video Cameras",
"Video Streaming",
"Calibration Free Rolling Shutter Removal",
"Rolling Shutter Distortions",
"Uncalibrated Streaming Videos",
"Specially Recorded Calibration Sequences",
"Focal Lengths",
"CMOS Cameras",
"Optical Zoom",
"Robustness",
"Repeatability",
"Scaleability",
"Abstracts",
"Tin",
"Equations",
"Robustness"
],
"authors": [
{
"affiliation": "Google Research, Mountain View, CA, USA",
"fullName": "Matthias Grundmann",
"givenName": "Matthias",
"surname": "Grundmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research, Mountain View, CA, USA",
"fullName": "Vivek Kwatra",
"givenName": "Vivek",
"surname": "Kwatra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Institute of Technology, Atlanta, USA",
"fullName": "Daniel Castro",
"givenName": "Daniel",
"surname": "Castro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research, Mountain View, CA, USA",
"fullName": "Irfan Essa",
"givenName": "Irfan",
"surname": "Essa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1662-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06215212",
"articleId": "12OmNx965xo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06215214",
"articleId": "12OmNyrIazV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130217",
"title": "Structure and motion estimation from rolling shutter video",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130217/12OmNAle6Hg",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2017/2628/0/2628a188",
"title": "Affine Motion Model Based Rolling Shutter Removal Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2017/2628a188/12OmNBCqbGw",
"parentPublication": {
"id": "proceedings/icmcce/2017/2628/0",
"title": "2017 Second International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/07780817",
"title": "Rolling Shutter Camera Relative Pose: Generalized Epipolar Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/07780817/12OmNBKW9vz",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a882",
"title": "Rolling Shutter Correction in Manhattan World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a882/12OmNCfSqHP",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a465",
"title": "Rolling Shutter Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a465/12OmNqFrGvu",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989b360",
"title": "Rolling Shutter Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989b360/12OmNwI8cdI",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459408",
"title": "Structure and kinematics triangulation with a rolling shutter stereo rig",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459408/12OmNzBwGtZ",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08621045",
"title": "Rolling Shutter Camera Absolute Pose",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08621045/17D45XERmmI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09926197",
"title": "Rolling Shutter Inversion: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09926197/1HGJ3Pb5VzW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c502",
"title": "From Two Rolling Shutters to One Global Shutter",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c502/1m3nX4WDZss",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WODaoT",
"doi": "10.1109/CVPR.2018.00507",
"title": "Rolling Shutter and Radial Distortion are Features for High Frame Rate Multi-camera Tracking",
"normalizedTitle": "Rolling Shutter and Radial Distortion are Features for High Frame Rate Multi-camera Tracking",
"abstract": "Traditionally, camera-based tracking approaches have treated rolling shutter and radial distortion as imaging artifacts that have to be overcome and corrected for in order to apply standard camera models and scene reconstruction methods. In this paper, we introduce a novel multi-camera tracking approach that for the first time jointly leverages the information introduced by rolling shutter and radial distortion as a feature to achieve superior performance with respect to high-frequency camera pose estimation. In particular, our system is capable of attaining high tracking rates that were previously unachievable. Our approach explicitly leverages rolling shutter capture and radial distortion to process individual rows, rather than entire image frames, for accurate camera motion estimation. We estimate a per-row 6 DoF pose of a rolling shutter camera by tracking multiple points on a radially distorted row whose rays span a curved surface in 3D space. Although tracking systems for rolling shutter cameras exist, we are the first to leverage radial distortion to measure a per-row pose - enabling us to use less than half the number of cameras required by the previous state of the art. We validate our system on both synthetic and real imagery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Traditionally, camera-based tracking approaches have treated rolling shutter and radial distortion as imaging artifacts that have to be overcome and corrected for in order to apply standard camera models and scene reconstruction methods. In this paper, we introduce a novel multi-camera tracking approach that for the first time jointly leverages the information introduced by rolling shutter and radial distortion as a feature to achieve superior performance with respect to high-frequency camera pose estimation. In particular, our system is capable of attaining high tracking rates that were previously unachievable. Our approach explicitly leverages rolling shutter capture and radial distortion to process individual rows, rather than entire image frames, for accurate camera motion estimation. We estimate a per-row 6 DoF pose of a rolling shutter camera by tracking multiple points on a radially distorted row whose rays span a curved surface in 3D space. Although tracking systems for rolling shutter cameras exist, we are the first to leverage radial distortion to measure a per-row pose - enabling us to use less than half the number of cameras required by the previous state of the art. We validate our system on both synthetic and real imagery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Traditionally, camera-based tracking approaches have treated rolling shutter and radial distortion as imaging artifacts that have to be overcome and corrected for in order to apply standard camera models and scene reconstruction methods. In this paper, we introduce a novel multi-camera tracking approach that for the first time jointly leverages the information introduced by rolling shutter and radial distortion as a feature to achieve superior performance with respect to high-frequency camera pose estimation. In particular, our system is capable of attaining high tracking rates that were previously unachievable. Our approach explicitly leverages rolling shutter capture and radial distortion to process individual rows, rather than entire image frames, for accurate camera motion estimation. We estimate a per-row 6 DoF pose of a rolling shutter camera by tracking multiple points on a radially distorted row whose rays span a curved surface in 3D space. Although tracking systems for rolling shutter cameras exist, we are the first to leverage radial distortion to measure a per-row pose - enabling us to use less than half the number of cameras required by the previous state of the art. We validate our system on both synthetic and real imagery.",
"fno": "642000e824",
"keywords": [
"Cameras",
"Motion Estimation",
"Object Tracking",
"Pose Estimation",
"Position Measurement",
"Camera Motion Estimation",
"High Frame Rate Multicamera Tracking Approach",
"Scene Reconstruction Methods",
"High Frequency Camera Pose Estimation",
"Per Row 6 Do F Pose Estimation",
"Leverage Radial Distortion",
"Radially Distorted Row",
"Rolling Shutter Camera",
"High Frequency Camera",
"Camera Based Tracking Approaches",
"Cameras",
"Distortion",
"Tracking",
"Three Dimensional Displays",
"Computer Vision",
"Lenses"
],
"authors": [
{
"affiliation": null,
"fullName": "Akash Bapat",
"givenName": "Akash",
"surname": "Bapat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "True Price",
"givenName": "True",
"surname": "Price",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jan-Michael Frahm",
"givenName": "Jan-Michael",
"surname": "Frahm",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4824-4833",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000e814",
"articleId": "17D45XzbnKV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000e834",
"articleId": "17D45Xtvp9a",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130217",
"title": "Structure and motion estimation from rolling shutter video",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130217/12OmNAle6Hg",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a948",
"title": "Rolling-Shutter-Aware Differential SfM and Image Rectification",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a948/12OmNC2OSNC",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a882",
"title": "Rolling Shutter Correction in Manhattan World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a882/12OmNCfSqHP",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a465",
"title": "Rolling Shutter Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a465/12OmNqFrGvu",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457e512",
"title": "Self-Calibration-Based Approach to Critical Motion Sequences of Rolling-Shutter Structure from Motion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457e512/12OmNzXWZKh",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/10/07748513",
"title": "Image Registration and Change Detection under Rolling Shutter Motion Blur",
"doi": null,
"abstractUrl": "/journal/tp/2017/10/07748513/13rRUynHukw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08621045",
"title": "Rolling Shutter Camera Absolute Pose",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08621045/17D45XERmmI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200e521",
"title": "SUNet: Symmetric Undistortion Network for Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e521/1BmL5Z4I1wY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300b062",
"title": "Revisiting Radial Distortion Absolute Pose",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c502",
"title": "From Two Rolling Shutters to One Global Shutter",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c502/1m3nX4WDZss",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3nX4WDZss",
"doi": "10.1109/CVPR42600.2020.00258",
"title": "From Two Rolling Shutters to One Global Shutter",
"normalizedTitle": "From Two Rolling Shutters to One Global Shutter",
"abstract": "Most consumer cameras are equipped with electronic rolling shutter, leading to image distortions when the camera moves during image capture. We explore a surprisingly simple camera configuration that makes it possible to undo the rolling shutter distortion: two cameras mounted to have different rolling shutter directions. Such a setup is easy and cheap to build and it possesses the geometric constraints needed to correct rolling shutter distortion using only a sparse set of point correspondences between the two images. We derive equations that describe the underlying geometry for general and special motions and present an efficient method for finding their solutions. Our synthetic and real experiments demonstrate that our approach is able to remove large rolling shutter distortions of all types without relying on any specific scene structure.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most consumer cameras are equipped with electronic rolling shutter, leading to image distortions when the camera moves during image capture. We explore a surprisingly simple camera configuration that makes it possible to undo the rolling shutter distortion: two cameras mounted to have different rolling shutter directions. Such a setup is easy and cheap to build and it possesses the geometric constraints needed to correct rolling shutter distortion using only a sparse set of point correspondences between the two images. We derive equations that describe the underlying geometry for general and special motions and present an efficient method for finding their solutions. Our synthetic and real experiments demonstrate that our approach is able to remove large rolling shutter distortions of all types without relying on any specific scene structure.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most consumer cameras are equipped with electronic rolling shutter, leading to image distortions when the camera moves during image capture. We explore a surprisingly simple camera configuration that makes it possible to undo the rolling shutter distortion: two cameras mounted to have different rolling shutter directions. Such a setup is easy and cheap to build and it possesses the geometric constraints needed to correct rolling shutter distortion using only a sparse set of point correspondences between the two images. We derive equations that describe the underlying geometry for general and special motions and present an efficient method for finding their solutions. Our synthetic and real experiments demonstrate that our approach is able to remove large rolling shutter distortions of all types without relying on any specific scene structure.",
"fno": "716800c502",
"keywords": [
"Cameras",
"Image Motion Analysis",
"Optical Distortion",
"Image Distortions",
"Electronic Rolling Shutter Distortion",
"Consumer Camera Configuration",
"Cameras",
"Distortion",
"Geometry",
"Smart Phones",
"Three Dimensional Displays",
"Image Capture",
"Image Reconstruction"
],
"authors": [
{
"affiliation": "ETH Zurich",
"fullName": "Cenek Albl",
"givenName": "Cenek",
"surname": "Albl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "VRG, FEE, CTU in Prague",
"fullName": "Zuzana Kukelova",
"givenName": "Zuzana",
"surname": "Kukelova",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zurich",
"fullName": "Viktor Larsson",
"givenName": "Viktor",
"surname": "Larsson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CIIRC, CTU in Prague",
"fullName": "Michal Polic",
"givenName": "Michal",
"surname": "Polic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CIIRC, CTU in Prague",
"fullName": "Tomas Pajdla",
"givenName": "Tomas",
"surname": "Pajdla",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zurich",
"fullName": "Konrad Schindler",
"givenName": "Konrad",
"surname": "Schindler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2502-2510",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800c492",
"articleId": "1m3nPsEFWh2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800c511",
"articleId": "1m3o1HPZIli",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmcce/2017/2628/0/2628a188",
"title": "Affine Motion Model Based Rolling Shutter Removal Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2017/2628a188/12OmNBCqbGw",
"parentPublication": {
"id": "proceedings/icmcce/2017/2628/0",
"title": "2017 Second International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a882",
"title": "Rolling Shutter Correction in Manhattan World",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a882/12OmNCfSqHP",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a465",
"title": "Rolling Shutter Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a465/12OmNqFrGvu",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989b360",
"title": "Rolling Shutter Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989b360/12OmNwI8cdI",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459408",
"title": "Structure and kinematics triangulation with a rolling shutter stereo rig",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459408/12OmNzBwGtZ",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a636",
"title": "Occlusion-Aware Rolling Shutter Rectification of 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a636/17D45W9KVJx",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000e824",
"title": "Rolling Shutter and Radial Distortion are Features for High Frame Rate Multi-camera Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000e824/17D45WODaoT",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200e521",
"title": "SUNet: Symmetric Undistortion Network for Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e521/1BmL5Z4I1wY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7773",
"title": "Neural Global Shutter: Learn to Restore Video from a Rolling Shutter Camera with Global Reset Feature",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7773/1H0KNkIimQw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09926197",
"title": "Rolling Shutter Inversion: Bring Rolling Shutter Images to High Framerate Global Shutter Video",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09926197/1HGJ3Pb5VzW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrAdsuf",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC1oT64",
"doi": "10.1109/ISMAR.2015.14",
"title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"normalizedTitle": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"abstract": "In Augmented Reality (AR) with an Optical See-Through Head-Mounted Display (OST-HMD), the spatial calibration between a user's eye and the display screen is a crucial issue in realizing seamless AR experiences. A successful calibration hinges upon proper modeling of the display system which is conceptually broken down into an eye part and an HMD part. This paper breaks the HMD part down even further to investigate optical aberration issues. The display optics causes two different optical aberrations that degrade the calibration quality: the distortion of incoming light from the physical world, and that of light from the image source of the HMD. While methods exist for correcting either of the two distortions independently, there is, to our knowledge, no method which corrects for both simultaneously. This paper proposes a calibration method that corrects both of the two distortions simultaneously for an arbitrary eye position given an OST-HMD system. We expand a light-field (LF) correction approach [8] originally designed for the former distortion. Our method is camera-based and has an offline learning and an online correction step. We verify our method in exemplary calibrations of two different OST-HMDs: a professional and a consumer OST-HMD. The results show that our method significantly improves the calibration quality compared to a conventional method with the accuracy comparable to 20/50 visual acuity. The results also indicate that only by correcting both the distortions simultaneously can improve the quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In Augmented Reality (AR) with an Optical See-Through Head-Mounted Display (OST-HMD), the spatial calibration between a user's eye and the display screen is a crucial issue in realizing seamless AR experiences. A successful calibration hinges upon proper modeling of the display system which is conceptually broken down into an eye part and an HMD part. This paper breaks the HMD part down even further to investigate optical aberration issues. The display optics causes two different optical aberrations that degrade the calibration quality: the distortion of incoming light from the physical world, and that of light from the image source of the HMD. While methods exist for correcting either of the two distortions independently, there is, to our knowledge, no method which corrects for both simultaneously. This paper proposes a calibration method that corrects both of the two distortions simultaneously for an arbitrary eye position given an OST-HMD system. We expand a light-field (LF) correction approach [8] originally designed for the former distortion. Our method is camera-based and has an offline learning and an online correction step. We verify our method in exemplary calibrations of two different OST-HMDs: a professional and a consumer OST-HMD. The results show that our method significantly improves the calibration quality compared to a conventional method with the accuracy comparable to 20/50 visual acuity. The results also indicate that only by correcting both the distortions simultaneously can improve the quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In Augmented Reality (AR) with an Optical See-Through Head-Mounted Display (OST-HMD), the spatial calibration between a user's eye and the display screen is a crucial issue in realizing seamless AR experiences. A successful calibration hinges upon proper modeling of the display system which is conceptually broken down into an eye part and an HMD part. This paper breaks the HMD part down even further to investigate optical aberration issues. The display optics causes two different optical aberrations that degrade the calibration quality: the distortion of incoming light from the physical world, and that of light from the image source of the HMD. While methods exist for correcting either of the two distortions independently, there is, to our knowledge, no method which corrects for both simultaneously. This paper proposes a calibration method that corrects both of the two distortions simultaneously for an arbitrary eye position given an OST-HMD system. We expand a light-field (LF) correction approach [8] originally designed for the former distortion. Our method is camera-based and has an offline learning and an online correction step. We verify our method in exemplary calibrations of two different OST-HMDs: a professional and a consumer OST-HMD. The results show that our method significantly improves the calibration quality compared to a conventional method with the accuracy comparable to 20/50 visual acuity. The results also indicate that only by correcting both the distortions simultaneously can improve the quality.",
"fno": "7660a043",
"keywords": [
"Optical Distortion",
"Distortion",
"Calibration",
"DVD",
"Cameras",
"Three Dimensional Displays",
"Optical Imaging"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuta Itoh",
"givenName": "Yuta",
"surname": "Itoh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gudrun Klinker",
"givenName": "Gudrun",
"surname": "Klinker",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "43-48",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7660-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7660a037",
"articleId": "12OmNwqx4aS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7660a049",
"articleId": "12OmNvAiSE1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798846",
"title": "Interaction-free calibration for optical see-through head-mounted displays based on 3D Eye localization",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798846/12OmNCdBDWL",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223450",
"title": "Evaluating optical see-through head-mounted display calibration via frustum visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223450/12OmNrAv3Ap",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549353",
"title": "A robust camera-based method for optical distortion calibration of head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549353/12OmNwvVrHy",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948424",
"title": "Performance and sensitivity analysis of INDICA: INteraction-Free DIsplay CAlibration for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948424/12OmNyYm2oO",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523375",
"title": "Gaussian Light Field: Estimation of Viewpoint-Dependent Blur for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523375/13rRUxYINfi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a259",
"title": "OSTNet: Calibration Method for Optical See-Through Head-Mounted Displays via Non-Parametric Distortion Map Generation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a259/1gysj1o4L16",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WuL68jAY",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00084",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"normalizedTitle": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"abstract": "Optical see-through head-mounted displays (OST-HMDs) have been increasingly used in research and industrial applications as Augmented Reality (AR) support devices. However, problems still exist that prevent their use as general-purpose devices. One of these issues is the color blending problem between content and background colors. More specifically, the light from the background overlaps with the light from the OST-HMD and shifts the color of OST-HMD's light from its intended display intensity and color. Though color compensation methods exist, in order to properly compensate for light shifts, we need to know how the background color will affect the light that eventually hits the user's eye when combined with the OST-HMD image. In this paper, we study how background colors shift as a result of passing through the OST-HMD's optics in order to better inform the development of color compensation methods. We measured the background color objectively for the Magic Leap 1, the HoloLens (first gen), and the HoloLens 2 and evaluated results. We found that all three OST-HMDs shift background color to a perceptible degree, and that the degree of shift depends on the original background color.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Optical see-through head-mounted displays (OST-HMDs) have been increasingly used in research and industrial applications as Augmented Reality (AR) support devices. However, problems still exist that prevent their use as general-purpose devices. One of these issues is the color blending problem between content and background colors. More specifically, the light from the background overlaps with the light from the OST-HMD and shifts the color of OST-HMD's light from its intended display intensity and color. Though color compensation methods exist, in order to properly compensate for light shifts, we need to know how the background color will affect the light that eventually hits the user's eye when combined with the OST-HMD image. In this paper, we study how background colors shift as a result of passing through the OST-HMD's optics in order to better inform the development of color compensation methods. We measured the background color objectively for the Magic Leap 1, the HoloLens (first gen), and the HoloLens 2 and evaluated results. We found that all three OST-HMDs shift background color to a perceptible degree, and that the degree of shift depends on the original background color.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Optical see-through head-mounted displays (OST-HMDs) have been increasingly used in research and industrial applications as Augmented Reality (AR) support devices. However, problems still exist that prevent their use as general-purpose devices. One of these issues is the color blending problem between content and background colors. More specifically, the light from the background overlaps with the light from the OST-HMD and shifts the color of OST-HMD's light from its intended display intensity and color. Though color compensation methods exist, in order to properly compensate for light shifts, we need to know how the background color will affect the light that eventually hits the user's eye when combined with the OST-HMD image. In this paper, we study how background colors shift as a result of passing through the OST-HMD's optics in order to better inform the development of color compensation methods. We measured the background color objectively for the Magic Leap 1, the HoloLens (first gen), and the HoloLens 2 and evaluated results. We found that all three OST-HMDs shift background color to a perceptible degree, and that the degree of shift depends on the original background color.",
"fno": "536500a389",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Image Colour Analysis",
"Augmented Reality Support Devices",
"Background Color Shifts",
"Background Colors Shift",
"Color Blending Problem",
"Color Compensation Methods",
"General Purpose Devices",
"Head Mounted Displays",
"Industrial Applications",
"Intended Display Intensity",
"Light Shifts",
"Objective Measurements",
"Original Background Color",
"OST HMD Image",
"OST HM Ds Optics",
"OST HM Ds Shift Background Color",
"Head Mounted Displays",
"Image Color Analysis",
"Optical Variables Measurement",
"Optical Imaging",
"Augmented Reality",
"Optical Devices"
],
"authors": [
{
"affiliation": "Osaka University,Japan",
"fullName": "Daichi Hirobe",
"givenName": "Daichi",
"surname": "Hirobe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University,Japan",
"fullName": "Yuki Uranishi",
"givenName": "Yuki",
"surname": "Uranishi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "United States Osaka University,Augusta University,Japan",
"fullName": "Jason Orlosky",
"givenName": "Jason",
"surname": "Orlosky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University,Japan",
"fullName": "Shizuka Shirai",
"givenName": "Shizuka",
"surname": "Shirai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka Institute of Technology,Japan",
"fullName": "Photchara Ratsamee",
"givenName": "Photchara",
"surname": "Ratsamee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University,Japan",
"fullName": "Haruo Takemura",
"givenName": "Haruo",
"surname": "Takemura",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "389-390",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a384",
"articleId": "1J7WvMEzRgA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a391",
"articleId": "1J7WwXrPVqo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a043",
"title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08456571",
"title": "Restoring the Awareness in the Occluded Visual Field for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08456571/14M3DYLGFgs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a649",
"title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwIYZzz",
"title": "Fourth IEEE International Conference on Computer Vision Systems",
"acronym": "icvs",
"groupId": "1002064",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqNG3fj",
"doi": "10.1109/ICVS.2006.2",
"title": "A Desktop 3D Scanner Exploiting Rotation and Visual Rectification of Laser Profiles",
"normalizedTitle": "A Desktop 3D Scanner Exploiting Rotation and Visual Rectification of Laser Profiles",
"abstract": "We describe a low cost system for metric 3D scanning from uncalibrated images based on rotational kinematic constraints. The system is composed by a turntable, an offthe- shelf camera and a laser stripe illuminator. System operation is based on the construction of the virtual image of a surface of revolution (SOR), from which two imaged SOR cross-sections are obtained in an automatic way, and internal camera calibration is performed by exploiting the same object being scanned. Shape acquisition is finally obtained by laser profile rectification and collation. Experiments with real data are shown, providing an insight into both camera calibration and shape reconstruction performance. System accuracy appears to be adequate for desktop applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe a low cost system for metric 3D scanning from uncalibrated images based on rotational kinematic constraints. The system is composed by a turntable, an offthe- shelf camera and a laser stripe illuminator. System operation is based on the construction of the virtual image of a surface of revolution (SOR), from which two imaged SOR cross-sections are obtained in an automatic way, and internal camera calibration is performed by exploiting the same object being scanned. Shape acquisition is finally obtained by laser profile rectification and collation. Experiments with real data are shown, providing an insight into both camera calibration and shape reconstruction performance. System accuracy appears to be adequate for desktop applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe a low cost system for metric 3D scanning from uncalibrated images based on rotational kinematic constraints. The system is composed by a turntable, an offthe- shelf camera and a laser stripe illuminator. System operation is based on the construction of the virtual image of a surface of revolution (SOR), from which two imaged SOR cross-sections are obtained in an automatic way, and internal camera calibration is performed by exploiting the same object being scanned. Shape acquisition is finally obtained by laser profile rectification and collation. Experiments with real data are shown, providing an insight into both camera calibration and shape reconstruction performance. System accuracy appears to be adequate for desktop applications.",
"fno": "01578737",
"keywords": [
"Cameras",
"Calibration",
"Image Reconstruction",
"Costs",
"Shape",
"Layout",
"Laser Noise",
"Laser Modes",
"Computer Vision",
"Image Segmentation"
],
"authors": [],
"idPrefix": "icvs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-08-01T00:00:00",
"pubType": "proceedings",
"pages": "49-54",
"year": "2006",
"issn": null,
"isbn": "0-7695-2506-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "01578726",
"articleId": "12OmNx4gUwG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2009/3583/2/3583b479",
"title": "A PSO-Based Ball-Plate Calibration for Laser Scanner",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583b479/12OmNAPBbg3",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/2",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2009/3605/1/3605a781",
"title": "The Calibration Algorithm Between 2D Laser Range Finder and Platform",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2009/3605a781/12OmNB8kHX6",
"parentPublication": {
"id": "cso/2009/3605/1",
"title": "2009 International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/2007/2939/0/29390320",
"title": "Modeling and Calibration of Coupled Fish-Eye CCD Camera and Laser Range Scanner for Outdoor Environment Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/2007/29390320/12OmNBlofM6",
"parentPublication": {
"id": "proceedings/3dim/2007/2939/0",
"title": "2007 6th International Conference on 3-D Digital Imaging and Modeling",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2008/3494/1/04732158",
"title": "Camera Calibration Algorithm Based on Single View of Three Surfaces of Revolution",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/04732158/12OmNBr4eHA",
"parentPublication": {
"id": "proceedings/isise/2008/3494/1",
"title": "2008 International Symposium on Information Science and Engieering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/is3c/2016/3071/0/3071a850",
"title": "A Laser-Based Vision System for Tire Tread Depth Inspection",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2016/3071a850/12OmNC8MsAk",
"parentPublication": {
"id": "proceedings/is3c/2016/3071/0",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031c933",
"title": "Research on Error Analysis and Calibration Method of Laser Scan Range Finder",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031c933/12OmNs59JHx",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223223",
"title": "From accurate range imaging sensor calibration to accurate model-based 3D object localization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223223/12OmNxu6p9W",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671799",
"title": "In-situ interactive modeling using a single-point laser rangefinder coupled with a new hybrid orientation tracker",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671799/12OmNz61dzi",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2005/01/i0099",
"title": "Metric 3D Reconstruction and Texture Acquisition of Surfaces of Revolution from a Single Uncalibrated View",
"doi": null,
"abstractUrl": "/journal/tp/2005/01/i0099/13rRUwInvm2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a168",
"title": "Estimating Relative Pose between Nonoverlapping Cameras by Four Laser Pointers Based on General Camera Model",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a168/17D45WgziSl",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxFJXDg",
"title": "2010 Second World Congress on Software Engineering",
"acronym": "wcse",
"groupId": "1002945",
"volume": "2",
"displayVolume": "2",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvnOwuA",
"doi": "10.1109/WCSE.2010.157",
"title": "Theoretical Research on Adjacent Multi-station Point Cloud Data Precision Combination in 3D Laser Scanning",
"normalizedTitle": "Theoretical Research on Adjacent Multi-station Point Cloud Data Precision Combination in 3D Laser Scanning",
"abstract": "Adjacent multi-station rotational scanning the target band with the 3D laser scanner ( Leica ScanStation II ) .acquires the 3D point cloud models of every rotational position. Adjacent multi-station point seriate cloud models have been registered, acquires the correspondent points. Then combines point cloud models with the theory of single strip aerial triangulation, acquires coordinates of cloud models in the same coordinate system, nonlinear correction to the model, excludes the cumulative errors caused by model combination, completes a high precision splicing of more stations continuous strip of point cloud model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Adjacent multi-station rotational scanning the target band with the 3D laser scanner ( Leica ScanStation II ) .acquires the 3D point cloud models of every rotational position. Adjacent multi-station point seriate cloud models have been registered, acquires the correspondent points. Then combines point cloud models with the theory of single strip aerial triangulation, acquires coordinates of cloud models in the same coordinate system, nonlinear correction to the model, excludes the cumulative errors caused by model combination, completes a high precision splicing of more stations continuous strip of point cloud model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Adjacent multi-station rotational scanning the target band with the 3D laser scanner ( Leica ScanStation II ) .acquires the 3D point cloud models of every rotational position. Adjacent multi-station point seriate cloud models have been registered, acquires the correspondent points. Then combines point cloud models with the theory of single strip aerial triangulation, acquires coordinates of cloud models in the same coordinate system, nonlinear correction to the model, excludes the cumulative errors caused by model combination, completes a high precision splicing of more stations continuous strip of point cloud model.",
"fno": "05718400",
"keywords": [
"Image Registration",
"Object Detection",
"Optical Scanners",
"Adjacent Multi Station Point Cloud Data Precision Combination",
"3 D Laser Scanning",
"Rotational Position",
"Single Strip Aerial Triangulation",
"Nonlinear Correction",
"Clouds",
"Mathematical Model",
"Three Dimensional Displays",
"Data Models",
"Monitoring",
"Strips",
"Laser Modes",
"Registration",
"Combination",
"Aerial Triangulation",
"Nonlinear Correction"
],
"authors": [
{
"affiliation": null,
"fullName": "Tianzi Li",
"givenName": "Tianzi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Youfeng Zou",
"givenName": "Youfeng",
"surname": "Zou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wcse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-12-01T00:00:00",
"pubType": "proceedings",
"pages": "315-317",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-9287-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4303b311",
"articleId": "12OmNqIzh9z",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4303b318",
"articleId": "12OmNCd2ryd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/geows/2009/3527/0/04782686",
"title": "Recording and Modeling Paleolithic Caves through Laser Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/geows/2009/04782686/12OmNBIFmuQ",
"parentPublication": {
"id": "proceedings/geows/2009/3527/0",
"title": "Advanced Geographic Information Systems & Web Services, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/milcom/2002/7625/1/01180442",
"title": "Theoretical model for a cloudy-channel laser communications experiment",
"doi": null,
"abstractUrl": "/proceedings-article/milcom/2002/01180442/12OmNBdJ5gT",
"parentPublication": {
"id": "proceedings/milcom/2002/7625/2",
"title": "Military Communications Conference (MILCOM 2002)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a652",
"title": "Comprehensive Automated 3D Urban Environment Modelling Using Terrestrial Laser Scanning Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a652/12OmNwHhoNx",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2016/2305/0/2305a112",
"title": "Processing of Three-Dimensional Models for the Crystal Laser Engraving",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2016/2305a112/12OmNxQOjHn",
"parentPublication": {
"id": "proceedings/nicoint/2016/2305/0",
"title": "2016 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2016/4840/0/4840a466",
"title": "The Detecting Method of Building Deformation Based on Terrestrial Laser Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2016/4840a466/12OmNxRF72t",
"parentPublication": {
"id": "proceedings/cis/2016/4840/0",
"title": "2016 12th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2009/3888/3/3888c206",
"title": "Research on Three-Dimensional Point Clouds Processing for Standing Tree Volume Based on Laser Scanner",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2009/3888c206/12OmNyFCvV2",
"parentPublication": {
"id": "proceedings/kam/2009/3888/1",
"title": "Knowledge Acquisition and Modeling, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523389",
"title": "Spatio-Temporal Point Path Analysis and Optimization of a Galvanoscopic Scanning Laser Projector",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523389/13rRUxBJhvz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a248",
"title": "Slope Deformation Analysis and Research Based on 3D Laser Scanning Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a248/1APq91z67bq",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmiae/2022/7396/0/739600a174",
"title": "Application research of 3D laser scanning technology in hull rib assembly",
"doi": null,
"abstractUrl": "/proceedings-article/icmiae/2022/739600a174/1JgrH02LtDO",
"parentPublication": {
"id": "proceedings/icmiae/2022/7396/0",
"title": "2022 International Conference on Manufacturing, Industrial Automation and Electronics (ICMIAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itca/2020/0378/0/037800a303",
"title": "Rapid elimination of noise in 3D laser scanning point cloud data",
"doi": null,
"abstractUrl": "/proceedings-article/itca/2020/037800a303/1tpBgAzfU1G",
"parentPublication": {
"id": "proceedings/itca/2020/0378/0",
"title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNylborH",
"title": "Proceedings of the 2000 IEEE International Conference on Control Applications",
"acronym": "cca",
"groupId": "1000166",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNweBUDb",
"doi": "10.1109/CCA.2000.897483",
"title": "Development of a continuous scanning laser Doppler vibrometer for vibration mode shape analysis",
"normalizedTitle": "Development of a continuous scanning laser Doppler vibrometer for vibration mode shape analysis",
"abstract": "Addresses the vibration mode shape measurement technique utilizing a continuous scanning laser Doppler vibrometer (SLDV). The scanning capability is added to the conventional discrete LDV by reflecting the laser beams on the surface of the object using a harmonically oscillating mirror which is driven by an electromagnetic resonant scanning actuator. The precise and sufficient sweep angle is obtained without using a position controller. If a continuous sinusoidal scanning method is used, it can be shown that the velocity output signal from the SLDV is modulated to give the spatial velocity distribution in terms of coefficients which are obtained from the Fourier transformation of the time dependent velocity signal. Using the Chebyshev series form, the analysis of the vibration mode shape techniques for straight line scanning and 2D area scanning are presented and discussed. The performance of the proposed SLDV is presented using the experimental results of the vibration mode shape of a cantilever beam.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Addresses the vibration mode shape measurement technique utilizing a continuous scanning laser Doppler vibrometer (SLDV). The scanning capability is added to the conventional discrete LDV by reflecting the laser beams on the surface of the object using a harmonically oscillating mirror which is driven by an electromagnetic resonant scanning actuator. The precise and sufficient sweep angle is obtained without using a position controller. If a continuous sinusoidal scanning method is used, it can be shown that the velocity output signal from the SLDV is modulated to give the spatial velocity distribution in terms of coefficients which are obtained from the Fourier transformation of the time dependent velocity signal. Using the Chebyshev series form, the analysis of the vibration mode shape techniques for straight line scanning and 2D area scanning are presented and discussed. The performance of the proposed SLDV is presented using the experimental results of the vibration mode shape of a cantilever beam.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Addresses the vibration mode shape measurement technique utilizing a continuous scanning laser Doppler vibrometer (SLDV). The scanning capability is added to the conventional discrete LDV by reflecting the laser beams on the surface of the object using a harmonically oscillating mirror which is driven by an electromagnetic resonant scanning actuator. The precise and sufficient sweep angle is obtained without using a position controller. If a continuous sinusoidal scanning method is used, it can be shown that the velocity output signal from the SLDV is modulated to give the spatial velocity distribution in terms of coefficients which are obtained from the Fourier transformation of the time dependent velocity signal. Using the Chebyshev series form, the analysis of the vibration mode shape techniques for straight line scanning and 2D area scanning are presented and discussed. The performance of the proposed SLDV is presented using the experimental results of the vibration mode shape of a cantilever beam.",
"fno": "00897483",
"keywords": [
"Shape Measurement",
"Measurement By Laser Beam",
"Vibration Measurement",
"Continuous Scanning Laser Doppler Vibrometer",
"Vibration Mode Shape Analysis",
"Harmonically Oscillating Mirror",
"Electromagnetic Resonant Scanning Actuator",
"Continuous Sinusoidal Scanning Method",
"Spatial Velocity Distribution",
"Time Dependent Velocity Signal",
"Chebyshev Series Form",
"Straight Line Scanning",
"2 D Area Scanning",
"Cantilever Beam",
"Vibrometers",
"Laser Modes",
"Vibration Measurement",
"Shape Measurement",
"Laser Beams",
"Surface Emitting Lasers",
"Mirrors",
"Resonance",
"Actuators",
"Chebyshev Approximation"
],
"authors": [
{
"affiliation": "Dept. of Mech., Kwangju Inst. of Sci. & Technol., South Korea",
"fullName": "Kyuhwan Park",
"givenName": null,
"surname": "Kyuhwan Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Seonjae Kim",
"givenName": null,
"surname": "Seonjae Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sangyol Yoon",
"givenName": null,
"surname": "Sangyol Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jekil Ryu",
"givenName": null,
"surname": "Jekil Ryu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cca",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-01-01T00:00:00",
"pubType": "proceedings",
"pages": "554,555,556,557,558,559",
"year": "2000",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00897482",
"articleId": "12OmNB1eJFo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00897484",
"articleId": "12OmNBQkwX5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457c548",
"title": "Non-contact Full Field Vibration Measurement Based on Phase-Shifting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c548/12OmNAoDinu",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccie/2010/4026/2/4026b367",
"title": "Modeling and Natural Frequency Characteristics of Coupled Vibration with Varying Length of Hoisting Rope in Drum Winding System",
"doi": null,
"abstractUrl": "/proceedings-article/ccie/2010/4026b367/12OmNAoUT5w",
"parentPublication": {
"id": "proceedings/ccie/2010/4026/2",
"title": "Computing, Control and Industrial Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/geows/2009/3527/0/04782686",
"title": "Recording and Modeling Paleolithic Caves through Laser Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/geows/2009/04782686/12OmNBIFmuQ",
"parentPublication": {
"id": "proceedings/geows/2009/3527/0",
"title": "Advanced Geographic Information Systems & Web Services, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iptc/2010/4196/0/4196a395",
"title": "Blasting Vibration Forecast Base on Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/iptc/2010/4196a395/12OmNvT2oUL",
"parentPublication": {
"id": "proceedings/iptc/2010/4196/0",
"title": "Intelligence Information Processing and Trusted Computing, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceet/2009/3819/2/3819b321",
"title": "Study on Vibration Modes of Large Type Vertical Motor",
"doi": null,
"abstractUrl": "/proceedings-article/iceet/2009/3819b321/12OmNy5R3A8",
"parentPublication": {
"id": "proceedings/iceet/2009/3819/2",
"title": "Energy and Environment Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2009/3605/1/3605a233",
"title": "Optimization Design of Plate on Vibration and Acoustics Based on Finite Element Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2009/3605a233/12OmNzVGcUh",
"parentPublication": {
"id": "cso/2009/3605/1",
"title": "2009 International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523389",
"title": "Spatio-Temporal Point Path Analysis and Optimization of a Galvanoscopic Scanning Laser Projector",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523389/13rRUxBJhvz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a248",
"title": "Slope Deformation Analysis and Research Based on 3D Laser Scanning Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a248/1APq91z67bq",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euros&p/2022/1614/0/161400a537",
"title": "Laser Meager Listener: A Scientific Exploration of Laser-based Speech Eavesdropping in Commercial User Space",
"doi": null,
"abstractUrl": "/proceedings-article/euros&p/2022/161400a537/1ErpCbbN8ti",
"parentPublication": {
"id": "proceedings/euros&p/2022/1614/0",
"title": "2022 IEEE 7th European Symposium on Security and Privacy (EuroS&P)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmiae/2022/7396/0/739600a174",
"title": "Application research of 3D laser scanning technology in hull rib assembly",
"doi": null,
"abstractUrl": "/proceedings-article/icmiae/2022/739600a174/1JgrH02LtDO",
"parentPublication": {
"id": "proceedings/icmiae/2022/7396/0",
"title": "2022 International Conference on Manufacturing, Industrial Automation and Electronics (ICMIAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKiry",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"acronym": "acpr",
"groupId": "1800942",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WgziSl",
"doi": "10.1109/ACPR.2017.79",
"title": "Estimating Relative Pose between Nonoverlapping Cameras by Four Laser Pointers Based on General Camera Model",
"normalizedTitle": "Estimating Relative Pose between Nonoverlapping Cameras by Four Laser Pointers Based on General Camera Model",
"abstract": "In this paper we propose a method of estimating relative pose between non-overlapping cameras by minimal four laser pointers based on a general camera model. In the proposed method, four laser pointers are mounted on a calibration chess board. We call this apparatus a laser calibration board, and model it as a general camera. First, the relative pose of each laser pointer at the coordinate system of the calibration chess board is calibrated. Then, the above calibrated laser calibration board is used to estimate the relative pose between two non-overlapping cameras by using a NPnP (Non-Perspective n Points) algorithm for a general camera. The experimental results are given to show the effectiveness of the proposed method also.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we propose a method of estimating relative pose between non-overlapping cameras by minimal four laser pointers based on a general camera model. In the proposed method, four laser pointers are mounted on a calibration chess board. We call this apparatus a laser calibration board, and model it as a general camera. First, the relative pose of each laser pointer at the coordinate system of the calibration chess board is calibrated. Then, the above calibrated laser calibration board is used to estimate the relative pose between two non-overlapping cameras by using a NPnP (Non-Perspective n Points) algorithm for a general camera. The experimental results are given to show the effectiveness of the proposed method also.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we propose a method of estimating relative pose between non-overlapping cameras by minimal four laser pointers based on a general camera model. In the proposed method, four laser pointers are mounted on a calibration chess board. We call this apparatus a laser calibration board, and model it as a general camera. First, the relative pose of each laser pointer at the coordinate system of the calibration chess board is calibrated. Then, the above calibrated laser calibration board is used to estimate the relative pose between two non-overlapping cameras by using a NPnP (Non-Perspective n Points) algorithm for a general camera. The experimental results are given to show the effectiveness of the proposed method also.",
"fno": "3354a168",
"keywords": [
"Calibration",
"Cameras",
"Measurement By Laser Beam",
"Pose Estimation",
"Position Measurement",
"Nonoverlapping Cameras",
"Laser Pointer",
"General Camera Model",
"Calibration Chess Board",
"Relative Pose Estimation",
"Laser Calibration Board",
"Nonperspective N Points Algorithm",
"N Pn P Algorithm",
"Cameras",
"Calibration",
"Laser Modes",
"Laser Theory",
"Estimation",
"Camera Pose Estimation",
"Non Overlapping Camera",
"General Camera Model",
"Laser Pointer"
],
"authors": [
{
"affiliation": null,
"fullName": "Shigang Li",
"givenName": "Shigang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Takahiro Harada",
"givenName": "Takahiro",
"surname": "Harada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wuhe Zou",
"givenName": "Wuhe",
"surname": "Zou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-11-01T00:00:00",
"pubType": "proceedings",
"pages": "168-172",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-3354-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3354a162",
"articleId": "17D45XwUAJ6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3354a173",
"articleId": "17D45WnnFVP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2009/3583/2/3583b479",
"title": "A PSO-Based Ball-Plate Calibration for Laser Scanner",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583b479/12OmNAPBbg3",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/2",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2009/3605/1/3605a781",
"title": "The Calibration Algorithm Between 2D Laser Range Finder and Platform",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2009/3605a781/12OmNB8kHX6",
"parentPublication": {
"id": "cso/2009/3605/1",
"title": "2009 International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/is3c/2016/3071/0/3071a850",
"title": "A Laser-Based Vision System for Tire Tread Depth Inspection",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2016/3071a850/12OmNC8MsAk",
"parentPublication": {
"id": "proceedings/is3c/2016/3071/0",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvs/2006/2506/0/01578737",
"title": "A Desktop 3D Scanner Exploiting Rotation and Visual Rectification of Laser Profiles",
"doi": null,
"abstractUrl": "/proceedings-article/icvs/2006/01578737/12OmNqNG3fj",
"parentPublication": {
"id": "proceedings/icvs/2006/2506/0",
"title": "Fourth IEEE International Conference on Computer Vision Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2006/2825/0/04155728",
"title": "Self-Calibration of Multiple Laser Planes for 3D Scene Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/04155728/12OmNwI8caf",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977432",
"title": "Calibrating Non-overlapping RGB-D Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977432/12OmNxE2mOA",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/1/4296a666",
"title": "Calibration Method of Three Dimensional (3D) Laser Measurement System Based on Projective Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296a666/12OmNyaoDy2",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/1",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671799",
"title": "In-situ interactive modeling using a single-point laser rangefinder coupled with a new hybrid orientation tracker",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671799/12OmNz61dzi",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523389",
"title": "Spatio-Temporal Point Path Analysis and Optimization of a Galvanoscopic Scanning Laser Projector",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523389/13rRUxBJhvz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2018/8481/0/848100a196",
"title": "Simulation and Experimental Study of Electrolyte Jet-Assisted Laser Micromachining and Punching",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2018/848100a196/17D45XlyDvh",
"parentPublication": {
"id": "proceedings/icmcce/2018/8481/0",
"title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1APq29uUhdm",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"acronym": "icvisp",
"groupId": "1823144",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1APq91z67bq",
"doi": "10.1109/ICVISP54630.2021.00052",
"title": "Slope Deformation Analysis and Research Based on 3D Laser Scanning Technology",
"normalizedTitle": "Slope Deformation Analysis and Research Based on 3D Laser Scanning Technology",
"abstract": "Aiming at the characteristics of traditional monitoring methods of slope deformation, such as low efficiency of single point measurement, limited operation area, time-consuming and laborious and difficulty in analyzing the overall deformation of slope. Taking the high cutting slope of a certain ramp section of suizi expressway as the research object, 3D laser scanning technology was used to scan the slope and obtain 3D point cloud data of the slope in different periods. The original data were pre-processed by point cloud registration, noise reduction, algorithm filtering and so on to obtain a 3D fined model, so as to realize the overall deformation analysis of the slope and the key monitoring of characteristic parts. The results show that 3D laser scanning technology can obtain more comprehensive 3D spatial information of the slope and realize the high efficiency and high precision monitoring of slope deformation, which is of significance for the real-time monitoring of slope deformation and disaster early warning.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Aiming at the characteristics of traditional monitoring methods of slope deformation, such as low efficiency of single point measurement, limited operation area, time-consuming and laborious and difficulty in analyzing the overall deformation of slope. Taking the high cutting slope of a certain ramp section of suizi expressway as the research object, 3D laser scanning technology was used to scan the slope and obtain 3D point cloud data of the slope in different periods. The original data were pre-processed by point cloud registration, noise reduction, algorithm filtering and so on to obtain a 3D fined model, so as to realize the overall deformation analysis of the slope and the key monitoring of characteristic parts. The results show that 3D laser scanning technology can obtain more comprehensive 3D spatial information of the slope and realize the high efficiency and high precision monitoring of slope deformation, which is of significance for the real-time monitoring of slope deformation and disaster early warning.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Aiming at the characteristics of traditional monitoring methods of slope deformation, such as low efficiency of single point measurement, limited operation area, time-consuming and laborious and difficulty in analyzing the overall deformation of slope. Taking the high cutting slope of a certain ramp section of suizi expressway as the research object, 3D laser scanning technology was used to scan the slope and obtain 3D point cloud data of the slope in different periods. The original data were pre-processed by point cloud registration, noise reduction, algorithm filtering and so on to obtain a 3D fined model, so as to realize the overall deformation analysis of the slope and the key monitoring of characteristic parts. The results show that 3D laser scanning technology can obtain more comprehensive 3D spatial information of the slope and realize the high efficiency and high precision monitoring of slope deformation, which is of significance for the real-time monitoring of slope deformation and disaster early warning.",
"fno": "077000a248",
"keywords": [
"Data Visualisation",
"Deformation",
"Disasters",
"Image Registration",
"Medical Image Processing",
"Optical Scanners",
"Solid Modelling",
"High Cutting Slope",
"3 D Laser Scanning Technology",
"3 D Point Cloud Data",
"Point Cloud Registration",
"3 D Fined Model",
"Comprehensive 3 D Spatial Information",
"Slope Deformation Analysis",
"Traditional Monitoring Methods",
"Single Point Measurement",
"Road Transportation",
"Point Cloud Compression",
"Solid Modeling",
"Three Dimensional Displays",
"Filtering",
"Laser Noise",
"Laser Modes",
"3 D Laser Scanning",
"Deformation Analysis",
"Slope Monitoring",
"Plane Fitting"
],
"authors": [
{
"affiliation": "Chongqing Jiaotong University,School of Civil Engineering,Chongqing,China,400074",
"fullName": "Xiaofang Xue",
"givenName": "Xiaofang",
"surname": "Xue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chongqing Jiaotong University,School of Civil Engineering,Chongqing,China,400074",
"fullName": "Jieming Guo",
"givenName": "Jieming",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chongqmg Smart City and Sustainable Development Academy,Chongqmg,China,401135",
"fullName": "Chunli Ying",
"givenName": "Chunli",
"surname": "Ying",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Southwest Jiaotong University,School of Civil Engineering,Chengdu,China,611756",
"fullName": "Yanhui Liu",
"givenName": "Yanhui",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Southeast University,School of Civil Engineering,Nanjing,China,211189",
"fullName": "Tong Guo",
"givenName": "Tong",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "OsloMet,Department of Computer Science,Oslo,Norway,0130",
"fullName": "Qian Meng",
"givenName": "Qian",
"surname": "Meng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fifth Engineering Company, 16th Bureau of China Railway,Wuyin Project Manager Department,Yinchuan,China,751400",
"fullName": "Yupeng Yang",
"givenName": "Yupeng",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Technology & Art and Design, OsloMet,Oslo,Norway,0130",
"fullName": "Daguang Han",
"givenName": "Daguang",
"surname": "Han",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvisp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "248-252",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0770-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "077000a242",
"articleId": "1APq2ZEyD0k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "077000a253",
"articleId": "1APq2jVO6Eo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/geows/2009/3527/0/04782686",
"title": "Recording and Modeling Paleolithic Caves through Laser Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/geows/2009/04782686/12OmNBIFmuQ",
"parentPublication": {
"id": "proceedings/geows/2009/3527/0",
"title": "Advanced Geographic Information Systems & Web Services, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2016/1269/0/07759977",
"title": "Efficient Nd:YAG based solar pumped single mode laser",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07759977/12OmNvDI3Ml",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcse/2010/4303/2/05718400",
"title": "Theoretical Research on Adjacent Multi-station Point Cloud Data Precision Combination in 3D Laser Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/wcse/2010/05718400/12OmNvnOwuA",
"parentPublication": {
"id": "proceedings/wcse/2010/4303/2",
"title": "2010 Second World Congress on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/2/3119b349",
"title": "Cultural Relic 3D Reconstruction from Digital Images and Laser Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119b349/12OmNzvQHS4",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/3",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a253",
"title": "Study on the Application of 3-D Laser Scanning BIM Technology in Optimization and Adjustment of Factory Pipeline System",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a253/1APq2jVO6Eo",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaice/2021/2186/0/218600a666",
"title": "3D Reconstruction Using a Linear Laser Scanner and A Camera",
"doi": null,
"abstractUrl": "/proceedings-article/icaice/2021/218600a666/1Et4KYrcVwc",
"parentPublication": {
"id": "proceedings/icaice/2021/2186/0",
"title": "2021 2nd International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmiae/2022/7396/0/739600a174",
"title": "Application research of 3D laser scanning technology in hull rib assembly",
"doi": null,
"abstractUrl": "/proceedings-article/icmiae/2022/739600a174/1JgrH02LtDO",
"parentPublication": {
"id": "proceedings/icmiae/2022/7396/0",
"title": "2022 International Conference on Manufacturing, Industrial Automation and Electronics (ICMIAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscsic/2022/5488/0/548800a352",
"title": "Transmission Line Modeling Based on 3D Laser Scanning Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/iscsic/2022/548800a352/1LvAk6oM9DG",
"parentPublication": {
"id": "proceedings/iscsic/2022/5488/0",
"title": "2022 6th International Symposium on Computer Science and Intelligent Control (ISCSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itca/2020/0378/0/037800a303",
"title": "Rapid elimination of noise in 3D laser scanning point cloud data",
"doi": null,
"abstractUrl": "/proceedings-article/itca/2020/037800a303/1tpBgAzfU1G",
"parentPublication": {
"id": "proceedings/itca/2020/0378/0",
"title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciddt/2020/0367/0/036700a083",
"title": "A Laser 3D Animation Optimization System Based on Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/iciddt/2020/036700a083/1wutLnjjLhK",
"parentPublication": {
"id": "proceedings/iciddt/2020/0367/0",
"title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1JgrDl33Nv2",
"title": "2022 International Conference on Manufacturing, Industrial Automation and Electronics (ICMIAE)",
"acronym": "icmiae",
"groupId": "9980888",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JgrH02LtDO",
"doi": "10.1109/ICMIAE57032.2022.00040",
"title": "Application research of 3D laser scanning technology in hull rib assembly",
"normalizedTitle": "Application research of 3D laser scanning technology in hull rib assembly",
"abstract": "The traditional measurement methods and techniques cannot satisfy the accuracy requirements of the floor pull-in method, which is mainly manifested in the incomplete measurement data and low efficiency. To some extent, it restricts the actual demand for floor pulling in assembly. Based on this, this paper discusses the application of 3D laser scanning technology and virtual simulation technology in the assembly accuracy management of hull rib plates. The acquisition and processing method of rib point cloud data based on 3D laser scanning is studied. Rib error detection and virtual assembly simulation research were carried out based on a reverse model. Good application results have been achieved. This research provides a new way and technical means to realize fast detection and predictive correction of assembly errors during rib assembly, which can provide reference and technical support for improving the accuracy and success of the floor pull-in method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The traditional measurement methods and techniques cannot satisfy the accuracy requirements of the floor pull-in method, which is mainly manifested in the incomplete measurement data and low efficiency. To some extent, it restricts the actual demand for floor pulling in assembly. Based on this, this paper discusses the application of 3D laser scanning technology and virtual simulation technology in the assembly accuracy management of hull rib plates. The acquisition and processing method of rib point cloud data based on 3D laser scanning is studied. Rib error detection and virtual assembly simulation research were carried out based on a reverse model. Good application results have been achieved. This research provides a new way and technical means to realize fast detection and predictive correction of assembly errors during rib assembly, which can provide reference and technical support for improving the accuracy and success of the floor pull-in method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The traditional measurement methods and techniques cannot satisfy the accuracy requirements of the floor pull-in method, which is mainly manifested in the incomplete measurement data and low efficiency. To some extent, it restricts the actual demand for floor pulling in assembly. Based on this, this paper discusses the application of 3D laser scanning technology and virtual simulation technology in the assembly accuracy management of hull rib plates. The acquisition and processing method of rib point cloud data based on 3D laser scanning is studied. Rib error detection and virtual assembly simulation research were carried out based on a reverse model. Good application results have been achieved. This research provides a new way and technical means to realize fast detection and predictive correction of assembly errors during rib assembly, which can provide reference and technical support for improving the accuracy and success of the floor pull-in method.",
"fno": "739600a174",
"keywords": [
"Assembling",
"CAD",
"Optical Scanners",
"Plates Structures",
"Production Engineering Computing",
"Solid Modelling",
"Virtual Manufacturing",
"Virtual Reality",
"3 D Laser Scanning Technology",
"Assembly Accuracy Management",
"Assembly Errors",
"Floor Pulling",
"Good Application Results",
"Hull Rib Assembly",
"Hull Rib Plates",
"Incomplete Measurement Data",
"Rib Error Detection",
"Rib Point Cloud Data",
"Traditional Measurement Methods",
"Virtual Assembly Simulation Research",
"Virtual Simulation Technology",
"Point Cloud Compression",
"Solid Modeling",
"Three Dimensional Displays",
"Measurement By Laser Beam",
"Predictive Models",
"Laser Modes",
"Marine Vehicles",
"Ship Construction",
"3 D Laser Scanning",
"Rib Plate",
"Assembly Simulation",
"Error Analysis"
],
"authors": [
{
"affiliation": "Shanghai Waigaoqiao Free Trade Zone Group Co., LTD,Shanghai,China",
"fullName": "Chuanhe Wang",
"givenName": "Chuanhe",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu University of Science and Technology,Zhenjiang,China",
"fullName": "Dazhi Zhou",
"givenName": "Dazhi",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu University of Science and Technology,Zhenjiang,China",
"fullName": "Baochen Mao",
"givenName": "Baochen",
"surname": "Mao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Waigaoqiao Free Trade Zone Group Co., LTD,Shanghai,China",
"fullName": "Jieqiang Liu",
"givenName": "Jieqiang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu University of Science and Technology,Zhenjiang,China",
"fullName": "Honggen Zhou",
"givenName": "Honggen",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu University of Science and Technology,Zhenjiang,China",
"fullName": "Jinfeng Liu",
"givenName": "Jinfeng",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmiae",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "174-178",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-7396-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "739600a170",
"articleId": "1JgrNrpwH1C",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "739600a179",
"articleId": "1JgrNIx8tuU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dsd/2017/2146/0/2146a252",
"title": "Role of Laser-Induced IR Drops in the Occurrence of Faults: Assessment and Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/dsd/2017/2146a252/12OmNAObbHn",
"parentPublication": {
"id": "proceedings/dsd/2017/2146/0",
"title": "2017 Euromicro Conference on Digital System Design (DSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/geows/2009/3527/0/04782686",
"title": "Recording and Modeling Paleolithic Caves through Laser Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/geows/2009/04782686/12OmNBIFmuQ",
"parentPublication": {
"id": "proceedings/geows/2009/3527/0",
"title": "Advanced Geographic Information Systems & Web Services, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/is3c/2016/3071/0/3071a850",
"title": "A Laser-Based Vision System for Tire Tread Depth Inspection",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2016/3071a850/12OmNC8MsAk",
"parentPublication": {
"id": "proceedings/is3c/2016/3071/0",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itc/2014/4722/0/07035329",
"title": "IC laser trimming speed-up through wafer-level spatial correlation modeling",
"doi": null,
"abstractUrl": "/proceedings-article/itc/2014/07035329/12OmNwAt1Fq",
"parentPublication": {
"id": "proceedings/itc/2014/4722/0",
"title": "2014 IEEE International Test Conference (ITC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671799",
"title": "In-situ interactive modeling using a single-point laser rangefinder coupled with a new hybrid orientation tracker",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671799/12OmNz61dzi",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2021/2172/0/217200a113",
"title": "A scheme and kinematics analysis of an assembly vehicle for terminal optical component of a laser fusion device",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2021/217200a113/1ANLqDRi7cs",
"parentPublication": {
"id": "proceedings/wcmeim/2021/2172/0",
"title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a253",
"title": "Study on the Application of 3-D Laser Scanning BIM Technology in Optimization and Adjustment of Factory Pipeline System",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a253/1APq2jVO6Eo",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/qe/2022/01/09964063",
"title": "The “Squeeze Laser”",
"doi": null,
"abstractUrl": "/journal/qe/2022/01/09964063/1IAFLDGVVVm",
"parentPublication": {
"id": "trans/qe",
"title": "IEEE Transactions on Quantum Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2019/4689/0/468900a470",
"title": "Design of a Stroboscopic Laser Grating Stripe Projection Device",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2019/468900a470/1h0FgohMNG0",
"parentPublication": {
"id": "proceedings/icmcce/2019/4689/0",
"title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsmt/2020/8668/0/866800a009",
"title": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/iccsmt/2020/866800a009/1u8pDO7YPzG",
"parentPublication": {
"id": "proceedings/iccsmt/2020/8668/0",
"title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1LiNSD4FqLu",
"title": "2022 IEEE/ACIS 23rd International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"acronym": "snpd",
"groupId": "10051724",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1LiNTpX4Zqg",
"doi": "10.1109/SNPD54884.2022.10051809",
"title": "Realization of Laser Object Vaporization Locating Based on Low-Cost 2D LiDAR",
"normalizedTitle": "Realization of Laser Object Vaporization Locating Based on Low-Cost 2D LiDAR",
"abstract": "This research focuses on integrating low-cost 2D-Lidar into a multi-split laser vaporization system that can incinerate wastes and general objects to improve the problem of manual alignment of the laser vaporization machine. In the experiment, we proposed a custom angle range and a distance threshold screening method for 2D-Lidar to distinguish whether there is an object in the scanning area and object locating. Through the law of cosine, the 2D-Lidar scanning angle is evenly distributed to the multi-split laser vaporization system. The object scanning method uses the Modbus TCP communication mode to enable the 2D-Lidar on the PC side to handshaking with the PLC on the central control side of the system. Scanning and size calculation of the object can obtain the position point of the object and hence improving the efficiency of the multi-split laser vaporization system. In addition, 2D lidar is readily available and inexpensive, in which making it more cost-effective than 3D-Lidar.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This research focuses on integrating low-cost 2D-Lidar into a multi-split laser vaporization system that can incinerate wastes and general objects to improve the problem of manual alignment of the laser vaporization machine. In the experiment, we proposed a custom angle range and a distance threshold screening method for 2D-Lidar to distinguish whether there is an object in the scanning area and object locating. Through the law of cosine, the 2D-Lidar scanning angle is evenly distributed to the multi-split laser vaporization system. The object scanning method uses the Modbus TCP communication mode to enable the 2D-Lidar on the PC side to handshaking with the PLC on the central control side of the system. Scanning and size calculation of the object can obtain the position point of the object and hence improving the efficiency of the multi-split laser vaporization system. In addition, 2D lidar is readily available and inexpensive, in which making it more cost-effective than 3D-Lidar.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This research focuses on integrating low-cost 2D-Lidar into a multi-split laser vaporization system that can incinerate wastes and general objects to improve the problem of manual alignment of the laser vaporization machine. In the experiment, we proposed a custom angle range and a distance threshold screening method for 2D-Lidar to distinguish whether there is an object in the scanning area and object locating. Through the law of cosine, the 2D-Lidar scanning angle is evenly distributed to the multi-split laser vaporization system. The object scanning method uses the Modbus TCP communication mode to enable the 2D-Lidar on the PC side to handshaking with the PLC on the central control side of the system. Scanning and size calculation of the object can obtain the position point of the object and hence improving the efficiency of the multi-split laser vaporization system. In addition, 2D lidar is readily available and inexpensive, in which making it more cost-effective than 3D-Lidar.",
"fno": "10051809",
"keywords": [
"Incineration",
"Optical Radar",
"Programmable Controllers",
"Transport Protocols",
"2 D Lidar Scanning Angle",
"3 D Lidar",
"General Objects",
"Laser Object Vaporization Locating",
"Laser Vaporization Machine",
"Low Cost 2 D Li DAR",
"Low Cost 2 D Lidar",
"Multisplit Laser Vaporization System",
"Object Locating",
"Object Scanning Method",
"Location Awareness",
"Laser Radar",
"Manuals",
"Laser Modes",
"Artificial Intelligence",
"Software Engineering",
"Laser",
"Li DAR",
"Scanning",
"Object Localization"
],
"authors": [
{
"affiliation": "National Chin-Yi University of Technology,Dept. of Electronic Engineering,Taichung,Taiwan",
"fullName": "Meng-Sheng Tsai",
"givenName": "Meng-Sheng",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chin-Yi University of Technology,Dept. of Electronic Engineering,Taichung,Taiwan",
"fullName": "Yuan-Hong Guan",
"givenName": "Yuan-Hong",
"surname": "Guan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chin-Yi University of Technology,Dept. of Electronic Engineering,Taichung,Taiwan",
"fullName": "You-Xuan Lin",
"givenName": "You-Xuan",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chin-Yi University of Technology,Dept. of Electronic Engineering,Taichung,Taiwan",
"fullName": "Meng-Hua Yen",
"givenName": "Meng-Hua",
"surname": "Yen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "snpd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-12-01T00:00:00",
"pubType": "proceedings",
"pages": "140-143",
"year": "2022",
"issn": null,
"isbn": "979-8-3503-1041-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "10051800",
"articleId": "1LiNUI7CQ80",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10051785",
"articleId": "1LiNVSHPmiQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2009/3992/0/05206539",
"title": "Automatic registration of LIDAR and optical images of urban scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206539/12OmNB8CiV9",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mascots/2009/4927/0/05366801",
"title": "A linear method for calibrating LIDAR-and-camera systems",
"doi": null,
"abstractUrl": "/proceedings-article/mascots/2009/05366801/12OmNBC8ACp",
"parentPublication": {
"id": "proceedings/mascots/2009/4927/0",
"title": "2009 IEEE International Symposium on Modeling, Analysis & Simulation of Computer and Telecommunication Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cca/2000/6562/0/00897483",
"title": "Development of a continuous scanning laser Doppler vibrometer for vibration mode shape analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cca/2000/00897483/12OmNweBUDb",
"parentPublication": {
"id": "proceedings/cca/2000/6562/0",
"title": "Proceedings of the 2000 IEEE International Conference on Control Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460601",
"title": "Real-time 2D video/3D LiDAR registration",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460601/12OmNyL0THp",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itng/2015/8828/0/8828a674",
"title": "An Obstacle Classification Method Using Multi-feature Comparison Based on 2D LIDAR Database",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2015/8828a674/12OmNzb7ZsR",
"parentPublication": {
"id": "proceedings/itng/2015/8828/0",
"title": "2015 12th International Conference on Information Technology - New Generations (ITNG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d743",
"title": "Multi-Echo LiDAR for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d743/1BmFBHEQzG8",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2022/5851/0/09887683",
"title": "First Arrival Differential LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2022/09887683/1GZitYnVKso",
"parentPublication": {
"id": "proceedings/iccp/2022/5851/0",
"title": "2022 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7000",
"title": "Raw High-Definition Radar for Multi-Task Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7000/1H1hxUN5Rao",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccgiv/2022/9250/0/925000a001",
"title": "A novel Loop Closure Approach in 2D LIDAR SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccgiv/2022/925000a001/1Lxfozxuous",
"parentPublication": {
"id": "proceedings/iccgiv/2022/9250/0",
"title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090401",
"title": "Learning to Match 2D Images and 3D LiDAR Point Clouds for Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090401/1jIxmhXvH7a",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tpB6mNNDaM",
"title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)",
"acronym": "itca",
"groupId": "1836624",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1tpBgAzfU1G",
"doi": "10.1109/ITCA52113.2020.00071",
"title": "Rapid elimination of noise in 3D laser scanning point cloud data",
"normalizedTitle": "Rapid elimination of noise in 3D laser scanning point cloud data",
"abstract": "When using a hand-held 3D laser scanner to collect target object data, due to factors such as personnel operation, collection environment and equipment itself, a large number of external noise points are often produced. This will seriously affect the processing and reconstruction accuracy of later point cloud data. According to the data analysis, these external noise points are divided into two categories: flying points and cluster points. Aiming at this phenomenon, a point cloud model noise removal algorithm combining statistical filtering and pass-through filtering is proposed. Firstly, the flying points are eliminated by statistical filtering, and then the remaining large area cluster points are removed by through filtering. The experimental results show that the algorithm can quickly and accurately identify external noise points and eliminate them completely.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When using a hand-held 3D laser scanner to collect target object data, due to factors such as personnel operation, collection environment and equipment itself, a large number of external noise points are often produced. This will seriously affect the processing and reconstruction accuracy of later point cloud data. According to the data analysis, these external noise points are divided into two categories: flying points and cluster points. Aiming at this phenomenon, a point cloud model noise removal algorithm combining statistical filtering and pass-through filtering is proposed. Firstly, the flying points are eliminated by statistical filtering, and then the remaining large area cluster points are removed by through filtering. The experimental results show that the algorithm can quickly and accurately identify external noise points and eliminate them completely.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When using a hand-held 3D laser scanner to collect target object data, due to factors such as personnel operation, collection environment and equipment itself, a large number of external noise points are often produced. This will seriously affect the processing and reconstruction accuracy of later point cloud data. According to the data analysis, these external noise points are divided into two categories: flying points and cluster points. Aiming at this phenomenon, a point cloud model noise removal algorithm combining statistical filtering and pass-through filtering is proposed. Firstly, the flying points are eliminated by statistical filtering, and then the remaining large area cluster points are removed by through filtering. The experimental results show that the algorithm can quickly and accurately identify external noise points and eliminate them completely.",
"fno": "037800a303",
"keywords": [
"Data Analysis",
"Image Denoising",
"Image Filtering",
"Image Reconstruction",
"Optical Scanners",
"Statistical Analysis",
"Target Object Data",
"Data Analysis",
"Flying Points",
"Point Cloud Model Noise Removal Algorithm",
"Pass Through Filtering",
"Hand Held 3 D Laser Scanner",
"3 D Laser Scanning Point Cloud Data",
"External Noise Points",
"Statistical Filtering",
"Visualization",
"Three Dimensional Displays",
"Filtering",
"Laser Noise",
"Clustering Algorithms",
"Laser Modes",
"Visual Databases",
"3 D Laser Scanning",
"External Noise Points",
"Statistical Filtering",
"Pass Through Filtering"
],
"authors": [
{
"affiliation": "Inner Mongolia Agricultural University,College of Computer and Information Engineering,Hohhot,China",
"fullName": "Wang Weijie",
"givenName": "Wang",
"surname": "Weijie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inner Mongolia Agricultural University,College of Computer and Information Engineering,Hohhot,China",
"fullName": "Xue Hera",
"givenName": "Xue",
"surname": "Hera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inner Mongolia Agricultural University,College of Computer and Information Engineering,Hohhot,China",
"fullName": "Zhou Yanqing",
"givenName": "Zhou",
"surname": "Yanqing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inner Mongolia Agricultural University,College of Computer and Information Engineering,Hohhot,China",
"fullName": "Yang Tong",
"givenName": "Yang",
"surname": "Tong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "itca",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "303-306",
"year": "2020",
"issn": null,
"isbn": "978-1-6654-0378-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "037800a298",
"articleId": "1tpBj0oCAN2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "037800a307",
"articleId": "1tpBdq0jVtK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/geows/2009/3527/0/04782686",
"title": "Recording and Modeling Paleolithic Caves through Laser Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/geows/2009/04782686/12OmNBIFmuQ",
"parentPublication": {
"id": "proceedings/geows/2009/3527/0",
"title": "Advanced Geographic Information Systems & Web Services, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcse/2010/4303/2/05718400",
"title": "Theoretical Research on Adjacent Multi-station Point Cloud Data Precision Combination in 3D Laser Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/wcse/2010/05718400/12OmNvnOwuA",
"parentPublication": {
"id": "proceedings/wcse/2010/4303/2",
"title": "2010 Second World Congress on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvd/2006/2502/0/01581478",
"title": "Efficient techniques for noise characterization of sequential cells and macros",
"doi": null,
"abstractUrl": "/proceedings-article/icvd/2006/01581478/12OmNxecRZ7",
"parentPublication": {
"id": "proceedings/icvd/2006/2502/0",
"title": "19th International Conference on VLSI Design held jointly with 5th International Conference on Embedded Systems Design (VLSID'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcbs/2009/3739/0/3739a569",
"title": "Kalman Filtering Design Based on Real-Time Updating of Noise Matrix",
"doi": null,
"abstractUrl": "/proceedings-article/ijcbs/2009/3739a569/12OmNyvGyos",
"parentPublication": {
"id": "proceedings/ijcbs/2009/3739/0",
"title": "2009 International Joint Conference on Bioinformatics, Systems Biology and Intelligent Computing (IJCBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a248",
"title": "Slope Deformation Analysis and Research Based on 3D Laser Scanning Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a248/1APq91z67bq",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainit/2021/1296/0/129600a527",
"title": "Influence of reflected light from external cavity on distributed feedback fiber laser hydrophone",
"doi": null,
"abstractUrl": "/proceedings-article/ainit/2021/129600a527/1BzWyQFws00",
"parentPublication": {
"id": "proceedings/ainit/2021/1296/0",
"title": "2021 2nd International Seminar on Artificial Intelligence, Networking and Information Technology (AINIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acctcs/2022/0034/0/003400a374",
"title": "Mitigation Methods of Short-time Diurnal Magnetic Noise in Airborne Magnetic Survey",
"doi": null,
"abstractUrl": "/proceedings-article/acctcs/2022/003400a374/1F6Us6iJrDG",
"parentPublication": {
"id": "proceedings/acctcs/2022/0034/0",
"title": "2022 2nd Asia-Pacific Conference on Communications Technology and Computer Science (ACCTCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/qe/2022/01/09964063",
"title": "The “Squeeze Laser”",
"doi": null,
"abstractUrl": "/journal/qe/2022/01/09964063/1IAFLDGVVVm",
"parentPublication": {
"id": "trans/qe",
"title": "IEEE Transactions on Quantum Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a123",
"title": "Point Cloud Denoising Algorithm Based on Noise Classification",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a123/1p1gtJQEUPC",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsmt/2020/8668/0/866800a009",
"title": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/iccsmt/2020/866800a009/1u8pDO7YPzG",
"parentPublication": {
"id": "proceedings/iccsmt/2020/8668/0",
"title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB8TUim",
"doi": "10.1109/ISMAR.2014.6948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"normalizedTitle": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"abstract": "Head Mounted Displays such as Google Glass and the META have the potential to spur consumer-oriented Optical See-Through Augmented Reality applications. A correct spatial registration of those displays relative to a user's eye(s) is an essential problem for any HMD-based AR application. We provide an overview of established and novel approaches for the calibration of those displays including hands on experience in which participants will calibrate such head mounted displays. The following list provides a tentative list of topics covered during the tutorial. • Part 1: Introduction to OST calibration • Why OST Calibration is important? • Differences to Camera Calibration • Introduce camera calibration • Why is OST calibration hard? • The user in the loop — pointing accuracy • Slipping, the need for recalibration • Principal aspects of OST-HMD calibration • overview of data collection • confirmation methods • optimization • mono vs stereo • Details of OST calibration • Data collection methods: • SPAAM, Multi Point collection, stereo methods • Confirmation methods • Optimization approaches • Evaluation: perceptual measures vs. analytic measures • State of the art: Semi-, fully automatic calibration methodses • Part 2: Hands-on calibration • SPAAM-based calibration of Epson Moverio/ Google Glass with inside-out marker tracker.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Head Mounted Displays such as Google Glass and the META have the potential to spur consumer-oriented Optical See-Through Augmented Reality applications. A correct spatial registration of those displays relative to a user's eye(s) is an essential problem for any HMD-based AR application. We provide an overview of established and novel approaches for the calibration of those displays including hands on experience in which participants will calibrate such head mounted displays. The following list provides a tentative list of topics covered during the tutorial. • Part 1: Introduction to OST calibration • Why OST Calibration is important? • Differences to Camera Calibration • Introduce camera calibration • Why is OST calibration hard? • The user in the loop — pointing accuracy • Slipping, the need for recalibration • Principal aspects of OST-HMD calibration • overview of data collection • confirmation methods • optimization • mono vs stereo • Details of OST calibration • Data collection methods: • SPAAM, Multi Point collection, stereo methods • Confirmation methods • Optimization approaches • Evaluation: perceptual measures vs. analytic measures • State of the art: Semi-, fully automatic calibration methodses • Part 2: Hands-on calibration • SPAAM-based calibration of Epson Moverio/ Google Glass with inside-out marker tracker.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Head Mounted Displays such as Google Glass and the META have the potential to spur consumer-oriented Optical See-Through Augmented Reality applications. A correct spatial registration of those displays relative to a user's eye(s) is an essential problem for any HMD-based AR application. We provide an overview of established and novel approaches for the calibration of those displays including hands on experience in which participants will calibrate such head mounted displays. The following list provides a tentative list of topics covered during the tutorial. • Part 1: Introduction to OST calibration • Why OST Calibration is important? • Differences to Camera Calibration • Introduce camera calibration • Why is OST calibration hard? • The user in the loop — pointing accuracy • Slipping, the need for recalibration • Principal aspects of OST-HMD calibration • overview of data collection • confirmation methods • optimization • mono vs stereo • Details of OST calibration • Data collection methods: • SPAAM, Multi Point collection, stereo methods • Confirmation methods • Optimization approaches • Evaluation: perceptual measures vs. analytic measures • State of the art: Semi-, fully automatic calibration methodses • Part 2: Hands-on calibration • SPAAM-based calibration of Epson Moverio/ Google Glass with inside-out marker tracker.",
"fno": "06948513",
"keywords": [
"Calibration",
"Educational Institutions",
"Google",
"Glass",
"Adaptive Optics",
"Optical Feedback",
"Augmented Reality"
],
"authors": [
{
"affiliation": "Graz University of Technology, Austria, Yuta Itoh, TU Munich, Germany",
"fullName": "Jens Grubert",
"givenName": "Jens",
"surname": "Grubert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1-2",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948512",
"articleId": "12OmNy68EMC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948514",
"articleId": "12OmNvDqsS1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504693",
"title": "A calibration method for optical see-through head-mounted displays with a depth camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504693/12OmNAnMuMd",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798846",
"title": "Interaction-free calibration for optical see-through head-mounted displays based on 3D Eye localization",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798846/12OmNCdBDWL",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223450",
"title": "Evaluating optical see-through head-mounted display calibration via frustum visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223450/12OmNrAv3Ap",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836487",
"title": "Reduction of Interaction Space in Single Point Active Alignment Method for Optical See-Through Head-Mounted Display Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836487/12OmNyRg4AG",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2011/2183/0/06162910",
"title": "An empiric evaluation of confirmation methods for optical see-through head-mounted display calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06162910/12OmNzwpUfP",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446429",
"title": "Impact of Alignment Point Distance Distribution on SPAAM Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446429/13bd1gCd7Sz",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07021939",
"title": "Subjective Evaluation of a Semi-Automatic Optical See-Through Head-Mounted Display Calibration Technique",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07021939/13rRUwInvyB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523375",
"title": "Gaussian Light Field: Estimation of Viewpoint-Dependent Blur for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523375/13rRUxYINfi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a021",
"title": "Impact of Alignment Point Distance and Posture on SPAAM Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a021/17D45WaTkli",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzaQoFo",
"doi": "10.1109/ISMAR.2014.6948426",
"title": "SmartColor: Real-time color correction and contrast for optical see-through head-mounted displays",
"normalizedTitle": "SmartColor: Real-time color correction and contrast for optical see-through head-mounted displays",
"abstract": "Users of optical see-through head-mounted displays (OHMD) perceive color as a blend of the display color and the background. Color-blending is a major usability challenge as it leads to loss of color encodings and poor text legibility. Color correction aims at mitigating color blending by producing an alternative color which, when blended with the background, more closely approaches the color originally intended. To date, approaches to color correction do not yield optimal results or do not work in real-time. This paper makes two contributions. First, we present QuickCorrection, a realtime color correction algorithm based on display profiles. We describe the algorithm, measure its accuracy and analyze two implementations for the OpenGL graphics pipeline. Second, we present SmartColor, a middleware for color management of userinterface components in OHMD. SmartColor uses color correction to provide three management strategies: correction, contrast, and show-up-on-contrast. Correction determines the alternate color which best preserves the original color. Contrast determines the color which best warranties text legibility while preserving as much of the original hue. Show-up-on-contrast makes a component visible when a related component does not have enough contrast to be legible. We describe the SmartColor's architecture and illustrate the color strategies for various types of display content.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Users of optical see-through head-mounted displays (OHMD) perceive color as a blend of the display color and the background. Color-blending is a major usability challenge as it leads to loss of color encodings and poor text legibility. Color correction aims at mitigating color blending by producing an alternative color which, when blended with the background, more closely approaches the color originally intended. To date, approaches to color correction do not yield optimal results or do not work in real-time. This paper makes two contributions. First, we present QuickCorrection, a realtime color correction algorithm based on display profiles. We describe the algorithm, measure its accuracy and analyze two implementations for the OpenGL graphics pipeline. Second, we present SmartColor, a middleware for color management of userinterface components in OHMD. SmartColor uses color correction to provide three management strategies: correction, contrast, and show-up-on-contrast. Correction determines the alternate color which best preserves the original color. Contrast determines the color which best warranties text legibility while preserving as much of the original hue. Show-up-on-contrast makes a component visible when a related component does not have enough contrast to be legible. We describe the SmartColor's architecture and illustrate the color strategies for various types of display content.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Users of optical see-through head-mounted displays (OHMD) perceive color as a blend of the display color and the background. Color-blending is a major usability challenge as it leads to loss of color encodings and poor text legibility. Color correction aims at mitigating color blending by producing an alternative color which, when blended with the background, more closely approaches the color originally intended. To date, approaches to color correction do not yield optimal results or do not work in real-time. This paper makes two contributions. First, we present QuickCorrection, a realtime color correction algorithm based on display profiles. We describe the algorithm, measure its accuracy and analyze two implementations for the OpenGL graphics pipeline. Second, we present SmartColor, a middleware for color management of userinterface components in OHMD. SmartColor uses color correction to provide three management strategies: correction, contrast, and show-up-on-contrast. Correction determines the alternate color which best preserves the original color. Contrast determines the color which best warranties text legibility while preserving as much of the original hue. Show-up-on-contrast makes a component visible when a related component does not have enough contrast to be legible. We describe the SmartColor's architecture and illustrate the color strategies for various types of display content.",
"fno": "06948426",
"keywords": [
"Color",
"Image Color Analysis",
"Real Time Systems",
"Algorithm Design And Analysis",
"Graphics",
"Adaptive Optics",
"Accuracy",
"Contrast",
"Head Mounted Displays",
"See Through Displays",
"Transparency",
"Color Blending",
"Correction"
],
"authors": [
{
"affiliation": "Department of Computer Science, University of Manitoba, Winnipeg, Canada",
"fullName": "Juan David Hincapie-Ramos",
"givenName": "Juan",
"surname": "David Hincapie-Ramos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Manitoba, Winnipeg, Canada",
"fullName": "Levko Ivanchuk",
"givenName": "Levko",
"surname": "Ivanchuk",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Manitoba, Winnipeg, Canada",
"fullName": "Srikanth Kirshnamachari Sridharan",
"givenName": "Srikanth Kirshnamachari",
"surname": "Sridharan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Manitoba, Winnipeg, Canada",
"fullName": "Pourang Irani",
"givenName": "Pourang",
"surname": "Irani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "187-194",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948425",
"articleId": "12OmNwCaCwB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948427",
"articleId": "12OmNzfXaxR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460049",
"title": "SharpView: Improved clarity of defocused content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460049/12OmNBWzHQi",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/12/07138644",
"title": "SmartColor: Real-Time Color and Contrast Correction for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/12/07138644/13rRUwfZC0k",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523376",
"title": "Real-Time Radiometric Compensation for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523376/13rRUxASu0P",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523375",
"title": "Gaussian Light Field: Estimation of Viewpoint-Dependent Blur for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523375/13rRUxYINfi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10050791",
"title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089640",
"title": "Effects of Dark Mode Graphics on Visual Acuity and Fatigue with Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089640/1jIxgdFEoqA",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwcl7Kf",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzwpUfP",
"doi": "10.1109/ISMAR.2011.6143895",
"title": "An empiric evaluation of confirmation methods for optical see-through head-mounted display calibration",
"normalizedTitle": "An empiric evaluation of confirmation methods for optical see-through head-mounted display calibration",
"abstract": "The calibration of optical see-through head-mounted displays is an important fundament for correct object alignment in augmented reality. Any calibration process for OSTHMDs requires users to align 2D points in screen space with 3D points in the real world and to confirm each alignment. In this poster, we present the results of our empiric evaluation where we compared four confirmation methods: Keyboard, Hand-held, Voice, and Waiting. The Waiting method, designed to reduce head motion during confirmation, showed a significantly higher accuracy than all other methods. Averaging over a time frame for sampling user input before the time of confirmation improved the accuracy of all methods in addition. We conducted a further expert study proving that the results achieved with a video see-through head-mounted display showed valid for optical see-through head-mounted display calibration, too.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The calibration of optical see-through head-mounted displays is an important fundament for correct object alignment in augmented reality. Any calibration process for OSTHMDs requires users to align 2D points in screen space with 3D points in the real world and to confirm each alignment. In this poster, we present the results of our empiric evaluation where we compared four confirmation methods: Keyboard, Hand-held, Voice, and Waiting. The Waiting method, designed to reduce head motion during confirmation, showed a significantly higher accuracy than all other methods. Averaging over a time frame for sampling user input before the time of confirmation improved the accuracy of all methods in addition. We conducted a further expert study proving that the results achieved with a video see-through head-mounted display showed valid for optical see-through head-mounted display calibration, too.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The calibration of optical see-through head-mounted displays is an important fundament for correct object alignment in augmented reality. Any calibration process for OSTHMDs requires users to align 2D points in screen space with 3D points in the real world and to confirm each alignment. In this poster, we present the results of our empiric evaluation where we compared four confirmation methods: Keyboard, Hand-held, Voice, and Waiting. The Waiting method, designed to reduce head motion during confirmation, showed a significantly higher accuracy than all other methods. Averaging over a time frame for sampling user input before the time of confirmation improved the accuracy of all methods in addition. We conducted a further expert study proving that the results achieved with a video see-through head-mounted display showed valid for optical see-through head-mounted display calibration, too.",
"fno": "06162910",
"keywords": [
"Calibration",
"Keyboards",
"Three Dimensional Displays",
"Accuracy",
"Augmented Reality",
"Electronic Mail",
"Adaptive Optics"
],
"authors": [
{
"affiliation": "Fachgebiet Augmented Reality (FAR) Technische Universität München, Fakultät für Informatik Garching b. München, Germany",
"fullName": "Patrick Maier",
"givenName": "Patrick",
"surname": "Maier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Magic Vision Lab, University of South Australia, School of Computer and Information Science, Adelaide, Australia",
"fullName": "Arindam Dey",
"givenName": "Arindam",
"surname": "Dey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fachgebiet Augmented Reality (FAR) Technische Universität München, Fakultät für Informatik Garching b. München, Germany",
"fullName": "Christian A. L. Waechter",
"givenName": "Christian A. L.",
"surname": "Waechter",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Magic Vision Lab, University of South Australia, School of Computer and Information Science, Adelaide, Australia",
"fullName": "Christian Sandor",
"givenName": "Christian",
"surname": "Sandor",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fachgebiet Augmented Reality (FAR) Technische Universität München, Fakultät für Informatik Garching b. München, Germany",
"fullName": "Marcus Tönnis",
"givenName": "Marcus",
"surname": "Tönnis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fachgebiet Augmented Reality (FAR) Technische Universität München, Fakultät für Informatik Garching b. München, Germany",
"fullName": "Gudrun Klinker",
"givenName": "Gudrun",
"surname": "Klinker",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-10-01T00:00:00",
"pubType": "proceedings",
"pages": "267-268",
"year": "2011",
"issn": null,
"isbn": "978-1-4577-2183-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06162909",
"articleId": "12OmNAIdBPt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06162911",
"articleId": "12OmNCxbXIR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504755",
"title": "Spatial consistency perception in optical and video see-through head-mounted augmentations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504755/12OmNqNXEli",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223450",
"title": "Evaluating optical see-through head-mounted display calibration via frustum visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223450/12OmNrAv3Ap",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460047",
"title": "Evaluation of user-centric optical see-through head-mounted display calibration using a leap motion controller",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460047/12OmNrJRPdz",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836486",
"title": "Modeling Physical Structure as Additional Constraints for Stereoscopic Optical See-Through Head-Mounted Display Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836486/12OmNx7XH8d",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwar/1999/0359/0/03590075",
"title": "A Method for Calibrating See-Through Head-Mounted Displays for AR",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590075/12OmNxTVU20",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892255",
"title": "Robust optical see-through head-mounted display calibration: Taking anisotropic nature of user interaction errors into account",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892255/12OmNxvO04e",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802089",
"title": "Quantification of error from system and environmental sources in Optical See-Through head mounted display calibration methods",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802089/12OmNxwncbX",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836487",
"title": "Reduction of Interaction Space in Single Point Active Alignment Method for Optical See-Through Head-Mounted Display Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836487/12OmNyRg4AG",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gyslg8NM8o",
"doi": "10.1109/ISMAR-Adjunct.2019.00050",
"title": "Computational Glasses: Vision Augmentations Using Computational Near-Eye Optics and Displays",
"normalizedTitle": "Computational Glasses: Vision Augmentations Using Computational Near-Eye Optics and Displays",
"abstract": "Wearable computing devices are small enough that they can be worn on the body and are a constant companion to the user. While many wearable devices have been associated with monitoring health or managing diseases, head-mounted displays are traditionally linked to Augmented and Virtual Reality, and generally overlay 3D information that supports professionals or for edutainment. This is surprising as prescription glasses, their traditional siblings, are widely accepted as a standard device for managing focusing errors of the human eye. In this work, we want to make the case for Computational Glasses that utilise technologies from optical see-through head-mounted displays or computational optics to compensate visual impairments. We will introduce some of the seminal works in the field as well as introduce our own work in the field. We will also include some of the challenges for doing research on Computational Glasses as well as give an outlook for future developments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wearable computing devices are small enough that they can be worn on the body and are a constant companion to the user. While many wearable devices have been associated with monitoring health or managing diseases, head-mounted displays are traditionally linked to Augmented and Virtual Reality, and generally overlay 3D information that supports professionals or for edutainment. This is surprising as prescription glasses, their traditional siblings, are widely accepted as a standard device for managing focusing errors of the human eye. In this work, we want to make the case for Computational Glasses that utilise technologies from optical see-through head-mounted displays or computational optics to compensate visual impairments. We will introduce some of the seminal works in the field as well as introduce our own work in the field. We will also include some of the challenges for doing research on Computational Glasses as well as give an outlook for future developments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wearable computing devices are small enough that they can be worn on the body and are a constant companion to the user. While many wearable devices have been associated with monitoring health or managing diseases, head-mounted displays are traditionally linked to Augmented and Virtual Reality, and generally overlay 3D information that supports professionals or for edutainment. This is surprising as prescription glasses, their traditional siblings, are widely accepted as a standard device for managing focusing errors of the human eye. In this work, we want to make the case for Computational Glasses that utilise technologies from optical see-through head-mounted displays or computational optics to compensate visual impairments. We will introduce some of the seminal works in the field as well as introduce our own work in the field. We will also include some of the challenges for doing research on Computational Glasses as well as give an outlook for future developments.",
"fno": "476500a438",
"keywords": [
"Augmented Reality",
"Handicapped Aids",
"Helmet Mounted Displays",
"Wearable Computers",
"Computational Glasses",
"Vision Augmentations",
"Wearable Computing Devices",
"Health Monitoring",
"Optical See Through Head Mounted Displays",
"Prescription Glasses",
"Standard Device",
"Human Eye",
"Focusing Error Management",
"Computational Near Eye Optics",
"Disease Management",
"Visual Impairment Compensation",
"Virtual Reality",
"Augmented Reality",
"Overlay 3 D Information",
"Edutainment",
"Glass",
"Prototypes",
"Cameras",
"Adaptive Optics",
"Visualization",
"Image Color Analysis",
"Computational Glasses",
"Augmented Human",
"OSTHMD",
"Near Eye Optics",
"Near Eye Display",
"Vision Aid",
"Vision Augmentation",
"Head Mounted Displays"
],
"authors": [
{
"affiliation": "University of Otago",
"fullName": "Jonathan Sutton",
"givenName": "Jonathan",
"surname": "Sutton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Tobias Langlotz",
"givenName": "Tobias",
"surname": "Langlotz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Yuta Itoh",
"givenName": "Yuta",
"surname": "Itoh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "438-442",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a435",
"articleId": "1gysjLCZmKI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a443",
"articleId": "1gyslwNaSd2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948446",
"title": "[Poster] Non-parametric camera-based calibration of optical see-through glasses for augmented reality applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948446/12OmNASILJd",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504733",
"title": "A low-cost, low-latency approach to dynamic immersion in occlusive head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504733/12OmNBEGYLT",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-iscmht/2017/1023/0/08338579",
"title": "Blue glasses increase your alertness: Effects of colored glasses on psychomotor performance",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-iscmht/2017/08338579/12OmNxRF75p",
"parentPublication": {
"id": "proceedings/iciev-iscmht/2017/1023/0",
"title": "2017 6th International Conference on Informatics, Electronics and Vision & 2017 7th International Symposium in Computational Medical and Health Technology (ICIEV-ISCMHT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2016/8942/0/8942a390",
"title": "HMD-enabled Virtual Screens as Alternatives to Large Physical Displays",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a390/12OmNxwENHM",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispan-fcst-iscc/2017/0840/0/0840a119",
"title": "Computer-Assisted Billiard Self-Training Using Intelligent Glasses",
"doi": null,
"abstractUrl": "/proceedings-article/ispan-fcst-iscc/2017/0840a119/12OmNyo1nWq",
"parentPublication": {
"id": "proceedings/ispan-fcst-iscc/2017/0840/0",
"title": "2017 14th International Symposium on Pervasive Systems, Algorithms and Networks & 2017 11th International Conference on Frontier of Computer Science and Technology & 2017 Third International Symposium of Creative Computing (ISPAN-FCST-ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523376",
"title": "Real-Time Radiometric Compensation for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523376/13rRUxASu0P",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2011/01/mco2011010017",
"title": "3D Displays without Glasses: Coming to a Screen near You",
"doi": null,
"abstractUrl": "/magazine/co/2011/01/mco2011010017/13rRUxBa5iD",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a246",
"title": "Comparison of Virtual-Real Integration Efficiency between Light Field and Conventional Near-Eye AR Displays",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a246/1GvditqC14Q",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a581",
"title": "HoloBeam: Paper-Thin Near-Eye Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a581/1MNgR9rZSCc",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNviZlhM",
"title": "2017 9th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"acronym": "icmtma",
"groupId": "1002837",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAWYKGO",
"doi": "10.1109/ICMTMA.2017.0043",
"title": "A Method Study on the Target Value of Prevention of Truck Rear Under-Run Protection Device",
"normalizedTitle": "A Method Study on the Target Value of Prevention of Truck Rear Under-Run Protection Device",
"abstract": "Rear-end crash is one of the important types of road traffic accidents with the characteristics of heavy casualties. Among the accidents, it has caused greater casualties and higher mortality in the passenger car crash into truck accidents. According to the analysis of lots of traffic accidents, it's found that the truck rear under-run protection device has good protective effect and can reduce casualties in this kind of accidents. Only the analysis based on actual accidents data is able to provide real foundation for improving the rear under-run protection device. So this paper will, on the basis of actual accidents data, study on the determination method of the target value of prevention of truck rear under-run protection device. In order to carry out research on the basis of actual traffic accidents data, a large number of traffic accident cases has been collected and in-depth accident investigation has been conducted. In order to study a method on the target value of prevention of truck rear under-run protection device, some accidents has been appropriately chosen as data samples of road traffic accidents. The cumulative probability distribution was introduced into the study. Through analysis and deduction, the cumulative probability function of impact force has been established, and the characteristic point matrix of impact force has been determined, and the determination method of the target value of prevention has been finally found out. The function concluded by this method will introduce the weight of truck and the target probability of prevention so as to obtain the target value of prevention. The method can provide support for the optimization of rear under-run protection device. This paper further focuses on method exploration of the automotive safety technology as well as the vehicle standard key parameters on the basis of actual traffic accidents data, so it can also provide reference for other key parameters to be studied.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Rear-end crash is one of the important types of road traffic accidents with the characteristics of heavy casualties. Among the accidents, it has caused greater casualties and higher mortality in the passenger car crash into truck accidents. According to the analysis of lots of traffic accidents, it's found that the truck rear under-run protection device has good protective effect and can reduce casualties in this kind of accidents. Only the analysis based on actual accidents data is able to provide real foundation for improving the rear under-run protection device. So this paper will, on the basis of actual accidents data, study on the determination method of the target value of prevention of truck rear under-run protection device. In order to carry out research on the basis of actual traffic accidents data, a large number of traffic accident cases has been collected and in-depth accident investigation has been conducted. In order to study a method on the target value of prevention of truck rear under-run protection device, some accidents has been appropriately chosen as data samples of road traffic accidents. The cumulative probability distribution was introduced into the study. Through analysis and deduction, the cumulative probability function of impact force has been established, and the characteristic point matrix of impact force has been determined, and the determination method of the target value of prevention has been finally found out. The function concluded by this method will introduce the weight of truck and the target probability of prevention so as to obtain the target value of prevention. The method can provide support for the optimization of rear under-run protection device. This paper further focuses on method exploration of the automotive safety technology as well as the vehicle standard key parameters on the basis of actual traffic accidents data, so it can also provide reference for other key parameters to be studied.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Rear-end crash is one of the important types of road traffic accidents with the characteristics of heavy casualties. Among the accidents, it has caused greater casualties and higher mortality in the passenger car crash into truck accidents. According to the analysis of lots of traffic accidents, it's found that the truck rear under-run protection device has good protective effect and can reduce casualties in this kind of accidents. Only the analysis based on actual accidents data is able to provide real foundation for improving the rear under-run protection device. So this paper will, on the basis of actual accidents data, study on the determination method of the target value of prevention of truck rear under-run protection device. In order to carry out research on the basis of actual traffic accidents data, a large number of traffic accident cases has been collected and in-depth accident investigation has been conducted. In order to study a method on the target value of prevention of truck rear under-run protection device, some accidents has been appropriately chosen as data samples of road traffic accidents. The cumulative probability distribution was introduced into the study. Through analysis and deduction, the cumulative probability function of impact force has been established, and the characteristic point matrix of impact force has been determined, and the determination method of the target value of prevention has been finally found out. The function concluded by this method will introduce the weight of truck and the target probability of prevention so as to obtain the target value of prevention. The method can provide support for the optimization of rear under-run protection device. This paper further focuses on method exploration of the automotive safety technology as well as the vehicle standard key parameters on the basis of actual traffic accidents data, so it can also provide reference for other key parameters to be studied.",
"fno": "07832206",
"keywords": [
"Probability",
"Protection",
"Road Accidents",
"Road Safety",
"Truck Rear Under Run Protection Device",
"Rear End Crash",
"Road Traffic Accidents",
"Passenger Car Crash",
"Truck Accidents",
"Under Run Protection Device",
"In Depth Accident",
"Cumulative Probability Distribution",
"Cumulative Probability Function",
"Impact Force",
"Automotive Safety Technology",
"Vehicle Standard",
"Automobiles",
"Accidents",
"Force",
"Vehicle Crash Testing",
"Frequency Modulation",
"Truck",
"Passenger Car",
"Rear Under Run Protection Device",
"Prevention",
"Target Value"
],
"authors": [
{
"affiliation": null,
"fullName": "Sun Zhendong",
"givenName": "Sun",
"surname": "Zhendong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yan Lei",
"givenName": "Yan",
"surname": "Lei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lou Lei",
"givenName": "Lou",
"surname": "Lei",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmtma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "144-147",
"year": "2017",
"issn": "2157-1481",
"isbn": "978-1-5090-4868-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07832205",
"articleId": "12OmNyQYt58",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07832207",
"articleId": "12OmNC1Gua3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icoip/2010/4252/1/4252a498",
"title": "Estimation of Rear-ends Accidents Caused by the Delay of Controlling Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a498/12OmNApLGw0",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2012/4608/0/4608a728",
"title": "Research on Intelligent Anti-vehicle Rear-Ends Collision System",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2012/4608a728/12OmNC2OSJs",
"parentPublication": {
"id": "proceedings/isdea/2012/4608/0",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2012/4772/0/4772a671",
"title": "Research on Automotive Rear-End Collision Warning Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2012/4772a671/12OmNxaNGqi",
"parentPublication": {
"id": "proceedings/icdma/2012/4772/0",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2017/4868/0/07832227",
"title": "Study on the Driver's Lower Limb Injuries in Rear-End Truck Accident",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2017/07832227/12OmNxecS64",
"parentPublication": {
"id": "proceedings/icmtma/2017/4868/0",
"title": "2017 9th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/3/3583c475",
"title": "Analysis and Evaluation on the Rear-End Crash of the Car with a Dummy",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583c475/12OmNxveNHO",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a889",
"title": "Vehicle-to-Vehicle Rear Crashes in China - A Study of Accident Characteristics to Provide Input to Active Safety System Design",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a889/12OmNyRxFDE",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2022/7532/0/753200a151",
"title": "Bicycle-based collision prevention system using pedestrian trajectory prediction",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2022/753200a151/1LAyXjHEvMk",
"parentPublication": {
"id": "proceedings/candarw/2022/7532/0",
"title": "2022 Tenth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a031",
"title": "AC3R: Automatically Reconstructing Car Crashes from Police Reports",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a031/1cJ7n8xSxMY",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a290",
"title": "Automatically Reconstructing Car Crashes from Police Reports for Testing Self-Driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a290/1cJ7nMlYJLW",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icuems/2020/8832/0/09151735",
"title": "Truck Drivers with Cardiovascular Disease and Their Risk of Accidents",
"doi": null,
"abstractUrl": "/proceedings-article/icuems/2020/09151735/1lRlSabhjZ6",
"parentPublication": {
"id": "proceedings/icuems/2020/8832/0",
"title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBV9Icq",
"title": "2018 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)",
"acronym": "aqtr",
"groupId": "1001746",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyOHG0t",
"doi": "10.1109/AQTR.2018.8402776",
"title": "Electric cars — Challenges and trends",
"normalizedTitle": "Electric cars — Challenges and trends",
"abstract": "Electric mobility and particularly electric cars are seeing a comeback. Several mass manufactured models have even already seen updates. Established and newcomer car manufacturers are scrambling to bring new models to market and considerable research resources are invested to solve the problems. This paper looks at the current challenges and probable future trends regarding the cars, their batteries and the charging infrastructure. Global, European and regional conditions for electric car adoption are also discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Electric mobility and particularly electric cars are seeing a comeback. Several mass manufactured models have even already seen updates. Established and newcomer car manufacturers are scrambling to bring new models to market and considerable research resources are invested to solve the problems. This paper looks at the current challenges and probable future trends regarding the cars, their batteries and the charging infrastructure. Global, European and regional conditions for electric car adoption are also discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Electric mobility and particularly electric cars are seeing a comeback. Several mass manufactured models have even already seen updates. Established and newcomer car manufacturers are scrambling to bring new models to market and considerable research resources are invested to solve the problems. This paper looks at the current challenges and probable future trends regarding the cars, their batteries and the charging infrastructure. Global, European and regional conditions for electric car adoption are also discussed.",
"fno": "08402776",
"keywords": [
"Automobiles",
"Batteries",
"Europe",
"Hydrogen",
"Internal Combustion Engines",
"Petroleum",
"Electric Car",
"Charging Station",
"EVSE",
"Electric Car Battery",
"IEC 62196",
"CCS"
],
"authors": [
{
"affiliation": "Technical University of Cluj-Napoca, Automation Department, Cluj-Napoca, Romania",
"fullName": "Szilárd Enyedi",
"givenName": "Szilárd",
"surname": "Enyedi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aqtr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-2205-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08402775",
"articleId": "12OmNqBtj8k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08402777",
"articleId": "12OmNzBOihB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdi3c/2018/7523/0/752301a088",
"title": "Exact Rate Control of Electric Vehicle Using ARM with Battery Saving Mode",
"doi": null,
"abstractUrl": "/proceedings-article/icdi3c/2018/752301a088/12OmNxWcH6W",
"parentPublication": {
"id": "proceedings/icdi3c/2018/7523/0",
"title": "2018 International Conference on Design Innovations for 3Cs Compute Communicate Control (ICDI3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isms/2013/4963/0/4963a394",
"title": "Alternative Energy Efficient System for Charging Electric Cars",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2013/4963a394/12OmNxZkhuX",
"parentPublication": {
"id": "proceedings/isms/2013/4963/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcomp/2018/4705/0/470501a171",
"title": "Free Floating Electric Car Sharing in Smart Cities: Data Driven System Dimensioning",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2018/470501a171/12OmNxdm4EL",
"parentPublication": {
"id": "proceedings/smartcomp/2018/4705/0",
"title": "2018 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2018/06/08617744",
"title": "Autonomous Cars: Social and Economic Implications",
"doi": null,
"abstractUrl": "/magazine/it/2018/06/08617744/17D45X2fUEX",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2022/9978/0/997800a691",
"title": "Study on private cars’ charging location selection behavior based on face-to-face investigation",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2022/997800a691/1ByemudcRB6",
"parentPublication": {
"id": "proceedings/icmtma/2022/9978/0",
"title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeae/2021/9540/0/954000a125",
"title": "Energy Impact and Aerodynamic Analysis of LiDARs in Autonomous Electric Cars",
"doi": null,
"abstractUrl": "/proceedings-article/icmeae/2021/954000a125/1GZjCYg8WIM",
"parentPublication": {
"id": "proceedings/icmeae/2021/9540/0",
"title": "2021 International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a290",
"title": "Automatically Reconstructing Car Crashes from Police Reports for Testing Self-Driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a290/1cJ7nMlYJLW",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2019/06/08896138",
"title": "Autonomous Cars: Challenges and Opportunities",
"doi": null,
"abstractUrl": "/magazine/it/2019/06/08896138/1eS9UiyWxkk",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/conisoft/2020/8450/0/845000a174",
"title": "Blockchain Software System Proposal Applied to Electric Self-driving Cars Charging Stations: A TSP Academic Project",
"doi": null,
"abstractUrl": "/proceedings-article/conisoft/2020/845000a174/1q0FQeeGFNK",
"parentPublication": {
"id": "proceedings/conisoft/2020/8450/0",
"title": "2020 8th International Conference in Software Engineering Research and Innovation (CONISOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euros&pw/2021/1012/0/999900a136",
"title": "Privacy and modern cars through a dual lens",
"doi": null,
"abstractUrl": "/proceedings-article/euros&pw/2021/999900a136/1y63mHqHfR6",
"parentPublication": {
"id": "proceedings/euros&pw/2021/1012/0",
"title": "2021 IEEE European Symposium on Security and Privacy Workshops (EuroS&PW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvlxJwN",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"acronym": "icmtma",
"groupId": "1002837",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyRxFDE",
"doi": "10.1109/ICMTMA.2016.214",
"title": "Vehicle-to-Vehicle Rear Crashes in China - A Study of Accident Characteristics to Provide Input to Active Safety System Design",
"normalizedTitle": "Vehicle-to-Vehicle Rear Crashes in China - A Study of Accident Characteristics to Provide Input to Active Safety System Design",
"abstract": "To develop an AEB assessment method working well in Chinese real world traffic situation, it is necessary to under-stand the detailed accident characteristics about ve-hicle-to-vehicle rear end crashes based on Chinese real world accident data. Therefore, the objective of this study was to understand the vehicle-to-vehicle rear end accident characteristics in detail, focusing on parameters important for design of active safety systems. Two current Chinese real world accident data-bases were queried for vehicle-to-vehicle rear end crashes. In total, 102 cases were selected for further detailed study regarding accident scene, lighting conditions (daytime and night), target vehicle status before the crash, and crash offset. In Chinese real world rear end accidents with passenger car as rear (striking) vehicle, passenger cars and trucks are most common collision opponents. The car-to-truck accident is the most severe rear end accident type, since under-ride occurs frequently. To protect more occupants in passenger cars in rear end accidents, more attention should be paid on the car-to-truck rear end accidents. A possible effective countermeasure is an AEB system able to detect both cars and trucks, trucks preferably also in night conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To develop an AEB assessment method working well in Chinese real world traffic situation, it is necessary to under-stand the detailed accident characteristics about ve-hicle-to-vehicle rear end crashes based on Chinese real world accident data. Therefore, the objective of this study was to understand the vehicle-to-vehicle rear end accident characteristics in detail, focusing on parameters important for design of active safety systems. Two current Chinese real world accident data-bases were queried for vehicle-to-vehicle rear end crashes. In total, 102 cases were selected for further detailed study regarding accident scene, lighting conditions (daytime and night), target vehicle status before the crash, and crash offset. In Chinese real world rear end accidents with passenger car as rear (striking) vehicle, passenger cars and trucks are most common collision opponents. The car-to-truck accident is the most severe rear end accident type, since under-ride occurs frequently. To protect more occupants in passenger cars in rear end accidents, more attention should be paid on the car-to-truck rear end accidents. A possible effective countermeasure is an AEB system able to detect both cars and trucks, trucks preferably also in night conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To develop an AEB assessment method working well in Chinese real world traffic situation, it is necessary to under-stand the detailed accident characteristics about ve-hicle-to-vehicle rear end crashes based on Chinese real world accident data. Therefore, the objective of this study was to understand the vehicle-to-vehicle rear end accident characteristics in detail, focusing on parameters important for design of active safety systems. Two current Chinese real world accident data-bases were queried for vehicle-to-vehicle rear end crashes. In total, 102 cases were selected for further detailed study regarding accident scene, lighting conditions (daytime and night), target vehicle status before the crash, and crash offset. In Chinese real world rear end accidents with passenger car as rear (striking) vehicle, passenger cars and trucks are most common collision opponents. The car-to-truck accident is the most severe rear end accident type, since under-ride occurs frequently. To protect more occupants in passenger cars in rear end accidents, more attention should be paid on the car-to-truck rear end accidents. A possible effective countermeasure is an AEB system able to detect both cars and trucks, trucks preferably also in night conditions.",
"fno": "2312a889",
"keywords": [
"Accidents",
"Roads",
"Injuries",
"Safety",
"Vehicle Crash Testing",
"Automobiles",
"Car To Truck Under Ride",
"AEB System Design",
"Passenger Car",
"Rear End Crashes"
],
"authors": [
{
"affiliation": null,
"fullName": "Bo Sui",
"givenName": "Bo",
"surname": "Sui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chengkai Ding",
"givenName": "Chengkai",
"surname": "Ding",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rikard Fredriksson",
"givenName": "Rikard",
"surname": "Fredriksson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shengqi Zhou",
"givenName": "Shengqi",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaohua Zhao",
"givenName": "Xiaohua",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmtma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "889-896",
"year": "2016",
"issn": "2157-1481",
"isbn": "978-1-5090-2312-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2312a885",
"articleId": "12OmNyYm2tm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2312a897",
"articleId": "12OmNyrIaH2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2017/4868/0/07832206",
"title": "A Method Study on the Target Value of Prevention of Truck Rear Under-Run Protection Device",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2017/07832206/12OmNAWYKGO",
"parentPublication": {
"id": "proceedings/icmtma/2017/4868/0",
"title": "2017 9th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/1/4252a498",
"title": "Estimation of Rear-ends Accidents Caused by the Delay of Controlling Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a498/12OmNApLGw0",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2013/4893/0/06454759",
"title": "A New Vehicle Safety Space Model Based on Driving Intention",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2013/06454759/12OmNBgQFJ1",
"parentPublication": {
"id": "proceedings/isdea/2013/4893/0",
"title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2012/4608/0/4608a728",
"title": "Research on Intelligent Anti-vehicle Rear-Ends Collision System",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2012/4608a728/12OmNC2OSJs",
"parentPublication": {
"id": "proceedings/isdea/2012/4608/0",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a768",
"title": "The Effects of Vehicle Front Design Variables and Impact Speed on Lower Extremity Injury in Pedestrian Collisions Using In-Depth Accident Data",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a768/12OmNxIRxTi",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2017/4868/0/07832227",
"title": "Study on the Driver's Lower Limb Injuries in Rear-End Truck Accident",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2017/07832227/12OmNxecS64",
"parentPublication": {
"id": "proceedings/icmtma/2017/4868/0",
"title": "2017 9th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isads/2013/5069/0/06513430",
"title": "A new type of automatic alarming device to rescue accident injured in time",
"doi": null,
"abstractUrl": "/proceedings-article/isads/2013/06513430/12OmNyRg4uH",
"parentPublication": {
"id": "proceedings/isads/2013/5069/0",
"title": "2013 IEEE Eleventh International Symposium on Autonomous Decentralized Systems (ISADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2012/4772/0/4772a230",
"title": "Analysis of Vehicle Crash Compatibility",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2012/4772a230/12OmNyugyHM",
"parentPublication": {
"id": "proceedings/icdma/2012/4772/0",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdataservice/2020/7022/0/702200a017",
"title": "An Ensemble of Multiple Boosting Methods Based on Classifier-Specific Soft Voting for Intelligent Vehicle Crash Injury Severity Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/bigdataservice/2020/702200a017/1mF8Af8GVIA",
"parentPublication": {
"id": "proceedings/bigdataservice/2020/7022/0",
"title": "2020 IEEE Sixth International Conference on Big Data Computing Service and Applications (BigDataService)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2020/7397/0/739700a836",
"title": "Identification of Nighttime Rear Vehicle Images",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2020/739700a836/1tGcsJcaZGg",
"parentPublication": {
"id": "proceedings/iiai-aai/2020/7397/0",
"title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKisy",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XvMcdq",
"doi": "10.1109/ISMAR.2018.00032",
"title": "Ensuring Safety in Augmented Reality from Trade-off Between Immersion and Situation Awareness",
"normalizedTitle": "Ensuring Safety in Augmented Reality from Trade-off Between Immersion and Situation Awareness",
"abstract": "Although the mobility and emerging technology of augmented reality (AR) have brought significant entertainment and convenience in everyday life, the use of AR is becoming a social problem as the accidents caused by a shortage of situation awareness due to an immersion of AR are increasing. In this paper, we address the trade-off between immersion and situation awareness as the fundamental factor of the AR-related accidents. As a solution against the trade-off, we propose a third-party component that prevents pedestrian-vehicle accidents in a traffic environment based on vehicle position estimation (VPE) and vehicle position visualization (VPV). From a RGB image sequence, VPE efficiently estimates the relative 3D position between a user and a car using generated convolutional neural network (CNN) model with a region-of-interest based scheme. VPV shows the estimated car position as a dot using an out-of-view object visualization method to alert the user from possible collisions. The VPE experiment with 16 combinations of parameters showed that the InceptionV3 model, fine-tuned on activated images yields the best performance with a root mean squared error of 0.34 m in 2.1 ms. The user study of VPV showed the inversely proportional relationship between the immersion controlled by the difficulty of the AR game and the frequency of situation awareness in both quantitatively and qualitatively. Additional VPV experiment assessing two out-of-view object visualization methods (EyeSee360 and Radar) showed no significant effect on the participants' activity, while EyeSee360 yielded faster responses and Radar engendered participants' preference on average. Our field study demonstrated an integration of VPE and VPV which has potentials for safety-ensured immersion when the proposed component is used for AR in daily uses. We expect that when the proposed component is developed enough to be used in real world, it will contribute to the safety-ensured AR, as well as to the population of AR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although the mobility and emerging technology of augmented reality (AR) have brought significant entertainment and convenience in everyday life, the use of AR is becoming a social problem as the accidents caused by a shortage of situation awareness due to an immersion of AR are increasing. In this paper, we address the trade-off between immersion and situation awareness as the fundamental factor of the AR-related accidents. As a solution against the trade-off, we propose a third-party component that prevents pedestrian-vehicle accidents in a traffic environment based on vehicle position estimation (VPE) and vehicle position visualization (VPV). From a RGB image sequence, VPE efficiently estimates the relative 3D position between a user and a car using generated convolutional neural network (CNN) model with a region-of-interest based scheme. VPV shows the estimated car position as a dot using an out-of-view object visualization method to alert the user from possible collisions. The VPE experiment with 16 combinations of parameters showed that the InceptionV3 model, fine-tuned on activated images yields the best performance with a root mean squared error of 0.34 m in 2.1 ms. The user study of VPV showed the inversely proportional relationship between the immersion controlled by the difficulty of the AR game and the frequency of situation awareness in both quantitatively and qualitatively. Additional VPV experiment assessing two out-of-view object visualization methods (EyeSee360 and Radar) showed no significant effect on the participants' activity, while EyeSee360 yielded faster responses and Radar engendered participants' preference on average. Our field study demonstrated an integration of VPE and VPV which has potentials for safety-ensured immersion when the proposed component is used for AR in daily uses. We expect that when the proposed component is developed enough to be used in real world, it will contribute to the safety-ensured AR, as well as to the population of AR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although the mobility and emerging technology of augmented reality (AR) have brought significant entertainment and convenience in everyday life, the use of AR is becoming a social problem as the accidents caused by a shortage of situation awareness due to an immersion of AR are increasing. In this paper, we address the trade-off between immersion and situation awareness as the fundamental factor of the AR-related accidents. As a solution against the trade-off, we propose a third-party component that prevents pedestrian-vehicle accidents in a traffic environment based on vehicle position estimation (VPE) and vehicle position visualization (VPV). From a RGB image sequence, VPE efficiently estimates the relative 3D position between a user and a car using generated convolutional neural network (CNN) model with a region-of-interest based scheme. VPV shows the estimated car position as a dot using an out-of-view object visualization method to alert the user from possible collisions. The VPE experiment with 16 combinations of parameters showed that the InceptionV3 model, fine-tuned on activated images yields the best performance with a root mean squared error of 0.34 m in 2.1 ms. The user study of VPV showed the inversely proportional relationship between the immersion controlled by the difficulty of the AR game and the frequency of situation awareness in both quantitatively and qualitatively. Additional VPV experiment assessing two out-of-view object visualization methods (EyeSee360 and Radar) showed no significant effect on the participants' activity, while EyeSee360 yielded faster responses and Radar engendered participants' preference on average. Our field study demonstrated an integration of VPE and VPV which has potentials for safety-ensured immersion when the proposed component is used for AR in daily uses. We expect that when the proposed component is developed enough to be used in real world, it will contribute to the safety-ensured AR, as well as to the population of AR.",
"fno": "745900a070",
"keywords": [
"Augmented Reality",
"Convolutional Neural Nets",
"Data Visualisation",
"Driver Information Systems",
"Image Colour Analysis",
"Image Sequences",
"Pedestrians",
"Road Accidents",
"Road Safety",
"Road Traffic",
"Traffic Engineering Computing",
"Inception V 3 Model",
"VPV Experiment",
"Out Of View Object Visualization Methods",
"Pedestrian Vehicle Accidents",
"Third Party Component",
"AR Related Accidents",
"Situation Awareness",
"Social Problem",
"Augmented Reality",
"Safety Ensured AR",
"Safety Ensured Immersion",
"Eye See 360",
"Activated Images",
"VPE Experiment",
"Out Of View Object Visualization Method",
"Estimated Car Position",
"Region Of Interest Based Scheme",
"Convolutional Neural Network Model",
"Relative 3 D Position",
"RGB Image Sequence",
"Vehicle Position Visualization",
"Estimation",
"Accidents",
"Visualization",
"Three Dimensional Displays",
"Safety",
"Automobiles",
"Two Dimensional Displays",
"Augmented Reality",
"Safety",
"Warning System",
"Evaluation",
"User Study"
],
"authors": [
{
"affiliation": null,
"fullName": "Jinki Jung",
"givenName": "Jinki",
"surname": "Jung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hyeopwoo Lee",
"givenName": "Hyeopwoo",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jeehye Choi",
"givenName": "Jeehye",
"surname": "Choi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Abhilasha Nanda",
"givenName": "Abhilasha",
"surname": "Nanda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Uwe Gruenefeld",
"givenName": "Uwe",
"surname": "Gruenefeld",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tim Stratmann",
"givenName": "Tim",
"surname": "Stratmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wilko Heuten",
"givenName": "Wilko",
"surname": "Heuten",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "70-79",
"year": "2018",
"issn": "1554-7868",
"isbn": "978-1-5386-7459-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "745900a057",
"articleId": "17D45XwUALN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "745900a080",
"articleId": "17D45W1Oa1E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgames/2013/0820/0/06632613",
"title": "Immersion and realism in video games - The confused moniker of video game engrossment",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2013/06632613/12OmNqGA50i",
"parentPublication": {
"id": "proceedings/cgames/2013/0820/0",
"title": "2013 18th International Conference on Computer Games: AI, Animation, Mobile, Interactive Multimedia, Educational & Serious Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2017/3588/0/3588a116",
"title": "Interactivity and Immersion Evaluation on Smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2017/3588a116/12OmNrYCXWN",
"parentPublication": {
"id": "proceedings/svr/2017/3588/0",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2016/8942/0/8942a300",
"title": "PoI Awareness, Relevance and Aggregation for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a300/12OmNwkhTe8",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hics/1996/7493/0/74930089",
"title": "A context-based approach to training situation awareness",
"doi": null,
"abstractUrl": "/proceedings-article/hics/1996/74930089/12OmNyFCw0J",
"parentPublication": {
"id": "proceedings/hics/1996/7493/0",
"title": "Human Interaction with Complex Systems, Annual Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948428",
"title": "Creating automatically aligned consensus realities for AR videoconferencing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948428/12OmNynsbys",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523388",
"title": "A Real-Time Augmented Reality System to See-Through Cars",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523388/13rRUxBJhvy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a541",
"title": "A study of the influence of AR on the perception, comprehension and projection levels of situation awareness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a541/1MNgMgQsPjW",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08836087",
"title": "The Impact of Immersion on Cluster Identification Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08836087/1dia1nodZeM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a096",
"title": "Stepping over Obstacles with Augmented Reality based on Visual Exproprioception",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a096/1pBMiFPYlkA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a454",
"title": "Augmented Reality with Maps for Off-Screen POI Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a454/1rSR7Fgh4qc",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "19m3yLbYQdq",
"title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"acronym": "iiai-aai",
"groupId": "1801921",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19m3FdzbOdq",
"doi": "10.1109/IIAI-AAI.2018.00174",
"title": "A Lightweight Augmented Reality System to See-Through Cars",
"normalizedTitle": "A Lightweight Augmented Reality System to See-Through Cars",
"abstract": "Internet of Vehicles (IoV) enables advanced driver assistance systems (ADAS) to acquire the sensing information of nearby vehicles for driving safety. One of the most dangerous driving maneuvers is overtaking, where the leading vehicle may occlude the view of the following vehicle's driver. To assist the driver, providing the capability of see-through car is a promising solution. ADAS with IoV can obtain the videos from the leading vehicle to compensate the occluded view. Therefore, we formulate an image synthesis problem and propose a lightweight solution. Compared to other works, we expect that our system can easily apply on a mobile device with a camera or a smartphone. For implementation, we utilize Unity with Vuforia to make pose estimation, rather than GPS or 3D reconstruction. We also validate the performance of our system by the prototyping result. It shows that the stitching performance is well and we can achieve this AR system with low costs at price and computation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Internet of Vehicles (IoV) enables advanced driver assistance systems (ADAS) to acquire the sensing information of nearby vehicles for driving safety. One of the most dangerous driving maneuvers is overtaking, where the leading vehicle may occlude the view of the following vehicle's driver. To assist the driver, providing the capability of see-through car is a promising solution. ADAS with IoV can obtain the videos from the leading vehicle to compensate the occluded view. Therefore, we formulate an image synthesis problem and propose a lightweight solution. Compared to other works, we expect that our system can easily apply on a mobile device with a camera or a smartphone. For implementation, we utilize Unity with Vuforia to make pose estimation, rather than GPS or 3D reconstruction. We also validate the performance of our system by the prototyping result. It shows that the stitching performance is well and we can achieve this AR system with low costs at price and computation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Internet of Vehicles (IoV) enables advanced driver assistance systems (ADAS) to acquire the sensing information of nearby vehicles for driving safety. One of the most dangerous driving maneuvers is overtaking, where the leading vehicle may occlude the view of the following vehicle's driver. To assist the driver, providing the capability of see-through car is a promising solution. ADAS with IoV can obtain the videos from the leading vehicle to compensate the occluded view. Therefore, we formulate an image synthesis problem and propose a lightweight solution. Compared to other works, we expect that our system can easily apply on a mobile device with a camera or a smartphone. For implementation, we utilize Unity with Vuforia to make pose estimation, rather than GPS or 3D reconstruction. We also validate the performance of our system by the prototyping result. It shows that the stitching performance is well and we can achieve this AR system with low costs at price and computation.",
"fno": "744701a855",
"keywords": [
"Augmented Reality",
"Automobiles",
"Cameras",
"Driver Information Systems",
"Mobile Computing",
"Pose Estimation",
"Road Safety",
"Video Signal Processing",
"Lightweight Augmented Reality System",
"Io V",
"Advanced Driver Assistance Systems",
"ADAS",
"Sensing Information",
"Nearby Vehicles",
"Driving Safety",
"Dangerous Driving Maneuvers",
"Occluded View",
"Image Synthesis Problem",
"Lightweight Solution",
"AR System",
"See Through Cars",
"Internet Of Vehicles",
"Mobile Device",
"Camera",
"Smartphone",
"Unity",
"Vuforia",
"Pose Estimation",
"GPS",
"3 D Reconstruction",
"Stitching Performance",
"Automobiles",
"Cameras",
"Image Synthesis",
"Three Dimensional Displays",
"Safety",
"Global Positioning System",
"Augmented Reality Image Synthesis See Through System Unity Vuforia Vehicle To Vehicle Communications"
],
"authors": [
{
"affiliation": "Coll. of Math. & Comput. Sci., Fuzhou Univ., Fuzhou, China",
"fullName": "Bingjie Yuan",
"givenName": "Bingjie",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci. & Eng., Yuan Ze Univ., Taoyuan, Taiwan",
"fullName": "Yan-Ann Chen",
"givenName": "Yan-Ann",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coll. of Math. & Comput. Sci., Fuzhou Univ., Fuzhou, China",
"fullName": "Shaozhen Ye",
"givenName": "Shaozhen",
"surname": "Ye",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iiai-aai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "855-860",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7447-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "744701a849",
"articleId": "19m3F3lD7Tq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "744701a861",
"articleId": "19m3I08s63e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2013/4932/0/4932a434",
"title": "Design Concept and Method of Advanced Driver Assistance Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2013/4932a434/12OmNBigFlh",
"parentPublication": {
"id": "proceedings/icmtma/2013/4932/0",
"title": "2013 Fifth International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d182",
"title": "Car that Knows Before You Do: Anticipating Maneuvers via Learning Temporal Driving Models",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d182/12OmNC2OSKe",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2013/2945/0/06671262",
"title": "User-centered perspectives for automotive augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2013/06671262/12OmNCmGNNS",
"parentPublication": {
"id": "proceedings/ismar-amh/2013/2945/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402592",
"title": "Generation of virtual display surfaces for in-vehicle contextual augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402592/12OmNwCJOPZ",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671791",
"title": "Augmented Reality driving supported by Vehicular Ad Hoc Networking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671791/12OmNyRg4vp",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issrew/2017/2387/0/2387a286",
"title": "Challenges in Certification of Autonomous Driving Systems",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2017/2387a286/12OmNzIl3Cu",
"parentPublication": {
"id": "proceedings/issrew/2017/2387/0",
"title": "2017 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ibica/2011/4606/0/4606a316",
"title": "Design and Implement Augmented Reality for Supporting Driving Visual Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/ibica/2011/4606a316/12OmNzahbSj",
"parentPublication": {
"id": "proceedings/ibica/2011/4606/0",
"title": "Innovations in Bio-inspired Computing and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523388",
"title": "A Real-Time Augmented Reality System to See-Through Cars",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523388/13rRUxBJhvy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546255",
"title": "Driving Maneuver Detection via Sequence Learning from Vehicle Signals and Video Images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546255/17D45WrVg6y",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699235",
"title": "Supporting Driver Situation Awareness for Autonomous Urban Driving with an Augmented-Reality Windshield Display",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699235/19F1ST8Mym4",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1Df7OJsGc48",
"title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"acronym": "percom-workshops",
"groupId": "1000552",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1Df87KUPDqM",
"doi": "10.1109/PerComWorkshops53856.2022.9767514",
"title": "Demo: Distracted Driving Detection",
"normalizedTitle": "Demo: Distracted Driving Detection",
"abstract": "Rear-end collisions, which account for over 33% of crashes, are the most frequent type of accidents in the USA. According to reports on road safety, almost all rear-end collisions are due to the distracted driving behavior of other nearby drivers. Ego vehicles should detect such deviated driving behaviors on other cars and warn their drivers to avoid rear-end crashes. In this paper, we demonstrate such a distracted driving detection system through field trials with multiple test vehicles. The ego vehicle observes the distance to preceding and following vehicles and detects distracted driving behavior on follower vehicles. A warning is generated whenever the follower vehicle exhibits distracted driving behavior.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Rear-end collisions, which account for over 33% of crashes, are the most frequent type of accidents in the USA. According to reports on road safety, almost all rear-end collisions are due to the distracted driving behavior of other nearby drivers. Ego vehicles should detect such deviated driving behaviors on other cars and warn their drivers to avoid rear-end crashes. In this paper, we demonstrate such a distracted driving detection system through field trials with multiple test vehicles. The ego vehicle observes the distance to preceding and following vehicles and detects distracted driving behavior on follower vehicles. A warning is generated whenever the follower vehicle exhibits distracted driving behavior.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Rear-end collisions, which account for over 33% of crashes, are the most frequent type of accidents in the USA. According to reports on road safety, almost all rear-end collisions are due to the distracted driving behavior of other nearby drivers. Ego vehicles should detect such deviated driving behaviors on other cars and warn their drivers to avoid rear-end crashes. In this paper, we demonstrate such a distracted driving detection system through field trials with multiple test vehicles. The ego vehicle observes the distance to preceding and following vehicles and detects distracted driving behavior on follower vehicles. A warning is generated whenever the follower vehicle exhibits distracted driving behavior.",
"fno": "09767514",
"keywords": [
"Collision Avoidance",
"Driver Information Systems",
"Road Accidents",
"Road Safety",
"Road Vehicles",
"Traffic Engineering Computing",
"Rear End Collisions",
"Road Safety",
"Distracted Driving Behavior",
"Nearby Drivers",
"Ego Vehicle",
"Deviated Driving Behaviors",
"Rear End Crashes",
"Detection System",
"Multiple Test Vehicles",
"Follower Vehicles",
"Follower Vehicle Exhibits",
"Pervasive Computing",
"Conferences",
"Drives",
"Road Safety",
"Automobiles",
"Vehicles",
"Accidents",
"Rear End Collisions",
"Distracted Driving Detection",
"Field Trials"
],
"authors": [
{
"affiliation": "Toyota Motor North America R&D,InfoTech Labs,Mountain View,CA,USA",
"fullName": "Seyhan Ucar",
"givenName": "Seyhan",
"surname": "Ucar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Toyota Motor North America R&D,InfoTech Labs,Mountain View,CA,USA",
"fullName": "Haritha Muralidharan",
"givenName": "Haritha",
"surname": "Muralidharan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Toyota Motor North America R&D,InfoTech Labs,Mountain View,CA,USA",
"fullName": "E. Akin Sisbot",
"givenName": "E. Akin",
"surname": "Sisbot",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Toyota Motor North America R&D,InfoTech Labs,Mountain View,CA,USA",
"fullName": "Kentaro Oguchi",
"givenName": "Kentaro",
"surname": "Oguchi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "percom-workshops",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "70-72",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-1647-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09767473",
"articleId": "1Df7SojAjaU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09767360",
"articleId": "1Df7XamVnkQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2010/6821/0/05444642",
"title": "Design and evaluation of a vibrotactile seat to improve spatial awareness while driving",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444642/12OmNwoPtoK",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2018/3227/0/08480282",
"title": "Detecting Distracted Driving Using a Wrist-Worn Wearable",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2018/08480282/17D45VsBU4a",
"parentPublication": {
"id": "proceedings/percomw/2018/3227/0",
"title": "2018 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/02/08550705",
"title": "Biofeedback Arrests Sympathetic and Behavioral Effects in Distracted Driving",
"doi": null,
"abstractUrl": "/journal/ta/2021/02/08550705/17D45Wt3Ex9",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2022/5176/0/517600a494",
"title": "My Mobile Knows That I am Driving! In-Vehicle (Relative) Blind Localization of a Smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2022/517600a494/1G89DyRFvz2",
"parentPublication": {
"id": "proceedings/mdm/2022/5176/0",
"title": "2022 23rd IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2022/8611/0/861100a277",
"title": "Poster: Distracted Driving Management",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2022/861100a277/1JC1eall7mE",
"parentPublication": {
"id": "proceedings/sec/2022/8611/0",
"title": "2022 IEEE/ACM 7th Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2022/8611/0/861100a279",
"title": "Demo: Nearby Aggressive Driving Detection",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2022/861100a279/1JC1fhV3b56",
"parentPublication": {
"id": "proceedings/sec/2022/8611/0",
"title": "2022 IEEE/ACM 7th Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2022/7180/0/718000a485",
"title": "Distracted Driving Detection Utilizing Wearable-based Bluetooth",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2022/718000a485/1JeE5VDgxig",
"parentPublication": {
"id": "proceedings/mass/2022/7180/0",
"title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinc/2022/0969/0/096900a044",
"title": "Detection of Distracted Driving Behavior Based on Object Information",
"doi": null,
"abstractUrl": "/proceedings-article/icinc/2022/096900a044/1M674koqxQQ",
"parentPublication": {
"id": "proceedings/icinc/2022/0969/0",
"title": "2022 International Conference on Informatics, Networking and Computing (ICINC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2019/03/08817317",
"title": "Multimedia for Autonomous Driving",
"doi": null,
"abstractUrl": "/magazine/mu/2019/03/08817317/1cPWP7sFJsI",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0/148500b150",
"title": "Distracted Driving Behavior Detection and Identification Based on Improved Cornernet-Saccade",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2020/148500b150/1ua4ED8VaZW",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0",
"title": "2020 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cJ7haJo31C",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"acronym": "icse-companion",
"groupId": "1002125",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ7n8xSxMY",
"doi": "10.1109/ICSE-Companion.2019.00031",
"title": "AC3R: Automatically Reconstructing Car Crashes from Police Reports",
"normalizedTitle": "AC3R: Automatically Reconstructing Car Crashes from Police Reports",
"abstract": "Autonomous driving carries the promise to drastically reduce car accidents, but recently reported fatal crashes involving self-driving cars suggest that the self-driving car software should be tested more thoroughly. For addressing this need, we introduce AC3R (Automatic Crash Constructor from Crash Report) which elaborates police reports to automatically recreate car crashes in a simulated environment that can be used for testing self-driving car software in critical situations. AC3R enables developers to quickly generate relevant test cases from the massive historical dataset of recorded car crashes. We demonstrate how AC3R can generate simulations of different car crashes and report the findings of a large user study which concluded that AC3R simulations are accurate. A video illustrating AC3R in action is available at: https://youtu.be/V708fDG_ux8.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Autonomous driving carries the promise to drastically reduce car accidents, but recently reported fatal crashes involving self-driving cars suggest that the self-driving car software should be tested more thoroughly. For addressing this need, we introduce AC3R (Automatic Crash Constructor from Crash Report) which elaborates police reports to automatically recreate car crashes in a simulated environment that can be used for testing self-driving car software in critical situations. AC3R enables developers to quickly generate relevant test cases from the massive historical dataset of recorded car crashes. We demonstrate how AC3R can generate simulations of different car crashes and report the findings of a large user study which concluded that AC3R simulations are accurate. A video illustrating AC3R in action is available at: https://youtu.be/V708fDG_ux8.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Autonomous driving carries the promise to drastically reduce car accidents, but recently reported fatal crashes involving self-driving cars suggest that the self-driving car software should be tested more thoroughly. For addressing this need, we introduce AC3R (Automatic Crash Constructor from Crash Report) which elaborates police reports to automatically recreate car crashes in a simulated environment that can be used for testing self-driving car software in critical situations. AC3R enables developers to quickly generate relevant test cases from the massive historical dataset of recorded car crashes. We demonstrate how AC3R can generate simulations of different car crashes and report the findings of a large user study which concluded that AC3R simulations are accurate. A video illustrating AC3R in action is available at: https://youtu.be/V708fDG_ux8.",
"fno": "176400a031",
"keywords": [
"Automobiles",
"Road Accidents",
"Road Safety",
"Traffic Engineering Computing",
"Video Signal Processing",
"Police Reports",
"Autonomous Driving",
"Car Accidents",
"Fatal Crashes",
"Self Driving Cars",
"Testing Self Driving Car Software",
"Recorded Car Crashes",
"AC 3 R",
"Automatic Crash Constructor From Crash Report",
"Accidents",
"Automobiles",
"Law Enforcement",
"Autonomous Automobiles",
"Ontologies",
"Roads",
"Vehicle Crash Testing",
"Test Case Generation",
"Self Driving Cars",
"Natural Language Processing"
],
"authors": [
{
"affiliation": "Saarland University/CISPA, Germany",
"fullName": "Tri Huynh",
"givenName": "Tri",
"surname": "Huynh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Passau, Germany",
"fullName": "Alessio Gambi",
"givenName": "Alessio",
"surname": "Gambi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Passau, Germany",
"fullName": "Gordon Fraser",
"givenName": "Gordon",
"surname": "Fraser",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icse-companion",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-05-01T00:00:00",
"pubType": "proceedings",
"pages": "31-34",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1764-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "176400a027",
"articleId": "1cJ7kfxwf4s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "176400a035",
"articleId": "1cJ7o3lm9tm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2009/3804/2/3804b928",
"title": "Study on Lightweight Optimization of Car Body Based on Crash",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2009/3804b928/12OmNAYXWEF",
"parentPublication": {
"id": "proceedings/icicta/2009/3804/3",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a889",
"title": "Vehicle-to-Vehicle Rear Crashes in China - A Study of Accident Characteristics to Provide Input to Active Safety System Design",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a889/12OmNyRxFDE",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2011/03/tts2011030430",
"title": "Which Crashes Should I Fix First?: Predicting Top Crashes at an Early Stage to Prioritize Debugging Efforts",
"doi": null,
"abstractUrl": "/journal/ts/2011/03/tts2011030430/13rRUwbs2hZ",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icons/2007/2807/0/04196350",
"title": "Intelligent 3D Car-Body Deformation Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icons/2007/04196350/183rAcPNPiP",
"parentPublication": {
"id": "proceedings/icons/2007/2807/0",
"title": "Second International Conference on Systems (ICONS'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aitest/2022/8737/0/873700a095",
"title": "Generating Critical Driving Scenarios from Accident Sketches",
"doi": null,
"abstractUrl": "/proceedings-article/aitest/2022/873700a095/1GZwsoaKmLm",
"parentPublication": {
"id": "proceedings/aitest/2022/8737/0",
"title": "2022 IEEE International Conference On Artificial Intelligence Testing (AITest)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2019/8350/0/08718113",
"title": "Self-Driving Car Meets Multi-Access Edge Computing for Deep Learning-Based Caching",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2019/08718113/1aIS1SK4z4Y",
"parentPublication": {
"id": "proceedings/icoin/2019/8350/0",
"title": "2019 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798084",
"title": "Passenger Anxiety when Seated in a Virtual Reality Self-Driving Car",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798084/1cJ0In7PNdu",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a027",
"title": "AsFault: Testing Self-Driving Car Software Using Search-Based Procedural Content Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a027/1cJ7kfxwf4s",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a290",
"title": "Automatically Reconstructing Car Crashes from Police Reports for Testing Self-Driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a290/1cJ7nMlYJLW",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/clei/2018/0437/0/043700a632",
"title": "Fast car Crash Detection in Video",
"doi": null,
"abstractUrl": "/proceedings-article/clei/2018/043700a632/1cdP0jfVmGQ",
"parentPublication": {
"id": "proceedings/clei/2018/0437/0",
"title": "2018 XLIV Latin American Computer Conference (CLEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cJ7haJo31C",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"acronym": "icse-companion",
"groupId": "1002125",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ7nMlYJLW",
"doi": "10.1109/ICSE-Companion.2019.00119",
"title": "Automatically Reconstructing Car Crashes from Police Reports for Testing Self-Driving Cars",
"normalizedTitle": "Automatically Reconstructing Car Crashes from Police Reports for Testing Self-Driving Cars",
"abstract": "Autonomous driving carries the promise to drastically reduce the number of car accidents; however, recently reported fatal crashes involving self-driving cars show this important goal is not yet achieved, and call for better testing of the software controlling self-driving cars. To better test self-driving car software, we propose to specifically test critical scenarios. Since these are difficult to test in field operation, we create simulations of critical situations. These simulations are automatically derived from natural language police reports of actual car crashes, which are available in historical datasets. Our initial evaluation shows that we can generate accurate simulations in a matter of minutes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Autonomous driving carries the promise to drastically reduce the number of car accidents; however, recently reported fatal crashes involving self-driving cars show this important goal is not yet achieved, and call for better testing of the software controlling self-driving cars. To better test self-driving car software, we propose to specifically test critical scenarios. Since these are difficult to test in field operation, we create simulations of critical situations. These simulations are automatically derived from natural language police reports of actual car crashes, which are available in historical datasets. Our initial evaluation shows that we can generate accurate simulations in a matter of minutes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Autonomous driving carries the promise to drastically reduce the number of car accidents; however, recently reported fatal crashes involving self-driving cars show this important goal is not yet achieved, and call for better testing of the software controlling self-driving cars. To better test self-driving car software, we propose to specifically test critical scenarios. Since these are difficult to test in field operation, we create simulations of critical situations. These simulations are automatically derived from natural language police reports of actual car crashes, which are available in historical datasets. Our initial evaluation shows that we can generate accurate simulations in a matter of minutes.",
"fno": "176400a290",
"keywords": [
"Automobiles",
"Mobile Robots",
"Natural Language Processing",
"Police Data Processing",
"Road Accidents",
"Road Safety",
"Text Analysis",
"Vehicle Dynamics",
"Car Accidents",
"Software Controlling Self Driving Cars",
"Natural Language Police Reports",
"Automatically Reconstructing Car Crashes",
"Autonomous Driving Carries",
"Text Analysis",
"Automobiles",
"Accidents",
"Law Enforcement",
"Autonomous Automobiles",
"Vehicle Crash Testing",
"Software",
"Testing",
"Test Case Generation",
"Self Driving Cars",
"Natural Language Processing",
"Procedural Content Generation"
],
"authors": [
{
"affiliation": "University of Passau, Germany",
"fullName": "Alessio Gambi",
"givenName": "Alessio",
"surname": "Gambi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Saarland University/CISPA, Germany",
"fullName": "Tri Huynh",
"givenName": "Tri",
"surname": "Huynh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Passau, Germany",
"fullName": "Gordon Fraser",
"givenName": "Gordon",
"surname": "Fraser",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icse-companion",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-05-01T00:00:00",
"pubType": "proceedings",
"pages": "290-291",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1764-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "176400a288",
"articleId": "1cJ7kx0Z8yI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "176400a294",
"articleId": "1cJ7jtfIWc0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2016/2312/0/2312a889",
"title": "Vehicle-to-Vehicle Rear Crashes in China - A Study of Accident Characteristics to Provide Input to Active Safety System Design",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a889/12OmNyRxFDE",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2018/5638/0/563801a303",
"title": "DeepTest: Automated Testing of Deep-Neural-Network-Driven Autonomous Cars",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2018/563801a303/13l5NWR8tNA",
"parentPublication": {
"id": "proceedings/icse/2018/5638/0",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523388",
"title": "A Real-Time Augmented Reality System to See-Through Cars",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523388/13rRUxBJhvy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2022/1647/0/09767514",
"title": "Demo: Distracted Driving Detection",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2022/09767514/1Df87KUPDqM",
"parentPublication": {
"id": "proceedings/percom-workshops/2022/1647/0",
"title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aitest/2022/8737/0/873700a095",
"title": "Generating Critical Driving Scenarios from Accident Sketches",
"doi": null,
"abstractUrl": "/proceedings-article/aitest/2022/873700a095/1GZwsoaKmLm",
"parentPublication": {
"id": "proceedings/aitest/2022/8737/0",
"title": "2022 IEEE International Conference On Artificial Intelligence Testing (AITest)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2022/8611/0/861100a279",
"title": "Demo: Nearby Aggressive Driving Detection",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2022/861100a279/1JC1fhV3b56",
"parentPublication": {
"id": "proceedings/sec/2022/8611/0",
"title": "2022 IEEE/ACM 7th Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a027",
"title": "AsFault: Testing Self-Driving Car Software Using Search-Based Procedural Content Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a027/1cJ7kfxwf4s",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a031",
"title": "AC3R: Automatically Reconstructing Car Crashes from Police Reports",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a031/1cJ7n8xSxMY",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2020/7122/0/712200a328",
"title": "Real-world Ethics for Self-Driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2020/712200a328/1pcSMstuKZy",
"parentPublication": {
"id": "proceedings/icse-companion/2020/7122/0",
"title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/conisoft/2020/8450/0/845000a174",
"title": "Blockchain Software System Proposal Applied to Electric Self-driving Cars Charging Stations: A TSP Academic Project",
"doi": null,
"abstractUrl": "/proceedings-article/conisoft/2020/845000a174/1q0FQeeGFNK",
"parentPublication": {
"id": "proceedings/conisoft/2020/8450/0",
"title": "2020 8th International Conference in Software Engineering Research and Innovation (CONISOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1j9xA6zpSFi",
"title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"acronym": "sitis",
"groupId": "1002425",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1j9xEaC4Zr2",
"doi": "10.1109/SITIS.2019.00091",
"title": "Recognizing the Illegal Parking Patterns of Cars on the Road in Front of the Bus Stop Using the Support Vector Machine",
"normalizedTitle": "Recognizing the Illegal Parking Patterns of Cars on the Road in Front of the Bus Stop Using the Support Vector Machine",
"abstract": "Traffic jams are a major problem in the lives of people in the capital. The occurrence of such problems is due to drivers who do not respect traffic laws, inappropriate behavior of the driver, and the cause of illegal parking at prohibited parking, resulting in the rear car unable to move further. Possible or the car must change lanes to another lane. Therefore, to reduce such problems, we have developed an illegal parking pattern recognition system for cars on the road. We have applied the Support Vector Machine (SVM) to the signal from CCTV cameras and find important characteristics of illegal car parked at the bus stop. Then use those images to learn to recognize the parking behavior of cars with Linear Regression. From the experiment, it is found that the recognition of cars parked on the road in front of the bus stop has an accuracy rate of 82.22 percent which can be used to detect the soaking of personal cars in real life.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Traffic jams are a major problem in the lives of people in the capital. The occurrence of such problems is due to drivers who do not respect traffic laws, inappropriate behavior of the driver, and the cause of illegal parking at prohibited parking, resulting in the rear car unable to move further. Possible or the car must change lanes to another lane. Therefore, to reduce such problems, we have developed an illegal parking pattern recognition system for cars on the road. We have applied the Support Vector Machine (SVM) to the signal from CCTV cameras and find important characteristics of illegal car parked at the bus stop. Then use those images to learn to recognize the parking behavior of cars with Linear Regression. From the experiment, it is found that the recognition of cars parked on the road in front of the bus stop has an accuracy rate of 82.22 percent which can be used to detect the soaking of personal cars in real life.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Traffic jams are a major problem in the lives of people in the capital. The occurrence of such problems is due to drivers who do not respect traffic laws, inappropriate behavior of the driver, and the cause of illegal parking at prohibited parking, resulting in the rear car unable to move further. Possible or the car must change lanes to another lane. Therefore, to reduce such problems, we have developed an illegal parking pattern recognition system for cars on the road. We have applied the Support Vector Machine (SVM) to the signal from CCTV cameras and find important characteristics of illegal car parked at the bus stop. Then use those images to learn to recognize the parking behavior of cars with Linear Regression. From the experiment, it is found that the recognition of cars parked on the road in front of the bus stop has an accuracy rate of 82.22 percent which can be used to detect the soaking of personal cars in real life.",
"fno": "568600a538",
"keywords": [
"Automobiles",
"Closed Circuit Television",
"Learning Artificial Intelligence",
"Object Recognition",
"Regression Analysis",
"Road Traffic",
"Support Vector Machines",
"Traffic Engineering Computing",
"Bus Stop",
"Support Vector Machine",
"Traffic Jams",
"Prohibited Parking",
"Illegal Parking Pattern Recognition System",
"CCTV Cameras",
"Car Recognition",
"Linear Regression",
"Automobiles",
"Roads",
"Feature Extraction",
"Support Vector Machines",
"Cameras",
"Histograms",
"Accidents",
"Personal Vehicles Support Vector Machine SVM Linear Regression CCTV"
],
"authors": [
{
"affiliation": "Faculty of Information Technology, ITMRC, King Mongkuts University of Technology, Thailand",
"fullName": "Mahasak Ketcham",
"givenName": "Mahasak",
"surname": "Ketcham",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Information Technology, ITMRC, King Mongkut’s University of Technology North Bangkok, Bangkok, Thailand",
"fullName": "Eakbodin Getkhaw",
"givenName": "Eakbodin",
"surname": "Getkhaw",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Information Technology, ITMRC, King Mongkut’s University of Technology North Bangkok, Bangkok, Thailand",
"fullName": "Manussawee Piyaneeranart",
"givenName": "Manussawee",
"surname": "Piyaneeranart",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electrical Engineering, Chulalongkorn University, Thailand",
"fullName": "Thittaporn Ganokratanaa",
"givenName": "Thittaporn",
"surname": "Ganokratanaa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Business Information Management, Phechaburi Rajabhat University, Phetchaburi, Thailand",
"fullName": "Worawut Yimyam",
"givenName": "Worawut",
"surname": "Yimyam",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sitis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-11-01T00:00:00",
"pubType": "proceedings",
"pages": "538-542",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5686-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "568600a531",
"articleId": "1j9xAH2S5Xi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "568600a543",
"articleId": "1j9xEBluNVK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itng/2011/4367/0/4367a135",
"title": "Authoritative Intelligent Perfect Parallel Parking Based on Fuzzy Logic Controller for Car-Type Mobile Robot",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2011/4367a135/12OmNwp74Bc",
"parentPublication": {
"id": "proceedings/itng/2011/4367/0",
"title": "Information Technology: New Generations, Third International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itnac/2017/6796/0/08215400",
"title": "Automated parking lot management system using embedded robot type smart car based on wireless sensors",
"doi": null,
"abstractUrl": "/proceedings-article/itnac/2017/08215400/12OmNwwMf0E",
"parentPublication": {
"id": "proceedings/itnac/2017/6796/0",
"title": "2017 27th International Telecommunication Networks and Applications Conference (ITNAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isms/2013/4963/0/4963a024",
"title": "Intelligent Parking System for Car Parking Guidance and Damage Notification",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2013/4963a024/12OmNx1qUZo",
"parentPublication": {
"id": "proceedings/isms/2013/4963/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2016/2185/0/07523570",
"title": "Statistics of parked cars for urban vehicular networks",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2016/07523570/12OmNyQphlg",
"parentPublication": {
"id": "proceedings/wowmom/2016/2185/0",
"title": "2016 IEEE 17th International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2017/3581/0/3581a840",
"title": "Illegal Parking Detection Using Gaussian Mixture Model and Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2017/3581a840/12OmNz2TCDM",
"parentPublication": {
"id": "proceedings/aiccsa/2017/3581/0",
"title": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fit/2018/9355/0/935500a065",
"title": "Optimized Pricing & Scheduling Model for Long Range Autonomous Valet Parking",
"doi": null,
"abstractUrl": "/proceedings-article/fit/2018/935500a065/17D45X7VTfb",
"parentPublication": {
"id": "proceedings/fit/2018/9355/0",
"title": "2018 International Conference on Frontiers of Information Technology (FIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/arace/2022/5153/0/515300a041",
"title": "Design of Intelligent Parking Lock for Road Parking Based on NB-IoT",
"doi": null,
"abstractUrl": "/proceedings-article/arace/2022/515300a041/1Ip7FJCDh1S",
"parentPublication": {
"id": "proceedings/arace/2022/5153/0",
"title": "2022 Asia Conference on Advanced Robotics, Automation, and Control Engineering (ARACE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a290",
"title": "Automatically Reconstructing Car Crashes from Police Reports for Testing Self-Driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a290/1cJ7nMlYJLW",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aect/2020/4452/0/09194195",
"title": "IoT for Smart Parking",
"doi": null,
"abstractUrl": "/proceedings-article/aect/2020/09194195/1n0IkL8J4be",
"parentPublication": {
"id": "proceedings/aect/2020/4452/0",
"title": "2019 International Conference on Advances in the Emerging Computing Technologies (AECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09431076",
"title": "A System for Real-time On-street Parking Detection and Visualization on an Edge Device",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09431076/1tROTV4SeQ0",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1y63hD9CG0o",
"title": "2021 IEEE European Symposium on Security and Privacy Workshops (EuroS&PW)",
"acronym": "euros&pw",
"groupId": "1820965",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1y63mHqHfR6",
"doi": "10.1109/EuroSPW54576.2021.00022",
"title": "Privacy and modern cars through a dual lens",
"normalizedTitle": "Privacy and modern cars through a dual lens",
"abstract": "Modern cars technologies are evolving quickly. They collect a variety of personal data and treat it on behalf of the car manufacturer to improve the drivers’ experience. The precise terms of such a treatment are stated within the privacy policies accepted by the user when buying a car or through the infotainment system when it is first started. This paper uses a double lens to assess people’s privacy while they drive a car. The first approach is objective and studies the readability of privacy policies that comes with cars. We analyse the privacy policies of twelve car brands and apply well-known readability indices to evaluate the extent to which privacy policies are comprehensible by all drivers. The second approach targets drivers’ opinions to extrapolate their privacy concerns and trust perceptions. We design a questionnaire to collect the opinions of 88 participants and draw essential statistics about them. Our combined findings indicate that privacy is insufficiently understood at present as an issue deriving from driving a car, hence future technologies should be tailored to make people more aware of the issue and to enable them to express their preferences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modern cars technologies are evolving quickly. They collect a variety of personal data and treat it on behalf of the car manufacturer to improve the drivers’ experience. The precise terms of such a treatment are stated within the privacy policies accepted by the user when buying a car or through the infotainment system when it is first started. This paper uses a double lens to assess people’s privacy while they drive a car. The first approach is objective and studies the readability of privacy policies that comes with cars. We analyse the privacy policies of twelve car brands and apply well-known readability indices to evaluate the extent to which privacy policies are comprehensible by all drivers. The second approach targets drivers’ opinions to extrapolate their privacy concerns and trust perceptions. We design a questionnaire to collect the opinions of 88 participants and draw essential statistics about them. Our combined findings indicate that privacy is insufficiently understood at present as an issue deriving from driving a car, hence future technologies should be tailored to make people more aware of the issue and to enable them to express their preferences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modern cars technologies are evolving quickly. They collect a variety of personal data and treat it on behalf of the car manufacturer to improve the drivers’ experience. The precise terms of such a treatment are stated within the privacy policies accepted by the user when buying a car or through the infotainment system when it is first started. This paper uses a double lens to assess people’s privacy while they drive a car. The first approach is objective and studies the readability of privacy policies that comes with cars. We analyse the privacy policies of twelve car brands and apply well-known readability indices to evaluate the extent to which privacy policies are comprehensible by all drivers. The second approach targets drivers’ opinions to extrapolate their privacy concerns and trust perceptions. We design a questionnaire to collect the opinions of 88 participants and draw essential statistics about them. Our combined findings indicate that privacy is insufficiently understood at present as an issue deriving from driving a car, hence future technologies should be tailored to make people more aware of the issue and to enable them to express their preferences.",
"fno": "999900a136",
"keywords": [
"Automobiles",
"Data Privacy",
"Traffic Engineering Computing",
"Trusted Computing",
"Car Manufacturer",
"Privacy Policies",
"Modern Car Technology",
"Driver Experience",
"Readability Indices",
"Driver Trust Perceptions",
"Personal Data Collection",
"Privacy",
"Automobiles",
"Lenses",
"Vehicles",
"Automotive",
"Privacy",
"Drivers",
"Cybersecurity"
],
"authors": [
{
"affiliation": "Università degli Studi di Catania,Dipartimento di Matematica e Informatica,Catania,Italy",
"fullName": "Giampaolo Bella",
"givenName": "Giampaolo",
"surname": "Bella",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Università degli Studi di Catania,Dipartimento di Matematica e Informatica,Catania,Italy",
"fullName": "Pietro Biondi",
"givenName": "Pietro",
"surname": "Biondi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Istituto di Informatica e Telematica Consiglio Nazionale delle Ricerche,Pisa,Italy",
"fullName": "Marco De Vincenzi",
"givenName": "Marco De",
"surname": "Vincenzi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Università degli Studi di Catania,Dipartimento di Matematica e Informatica,Catania,Italy",
"fullName": "Giuseppe Tudisco",
"givenName": "Giuseppe",
"surname": "Tudisco",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "euros&pw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-09-01T00:00:00",
"pubType": "proceedings",
"pages": "136-143",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1012-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "999900a129",
"articleId": "1y63jTQ8q0U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "999900a144",
"articleId": "1y63l6ABcIg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/trustcom-bigdatase-i-spa/2015/7952/1/07345387",
"title": "The Role of Risk Perceptions in Privacy Concerns Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2015/07345387/12OmNAtaS1l",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase-i-spa/2015/7952/2",
"title": "2015 IEEE Trustcom/BigDataSE/ISPA",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rvsp/2015/9647/0/9647a109",
"title": "Managing Private Cars Usage from the Perspective of Owners",
"doi": null,
"abstractUrl": "/proceedings-article/rvsp/2015/9647a109/12OmNBQTJgJ",
"parentPublication": {
"id": "proceedings/rvsp/2015/9647/0",
"title": "2015 Third International Conference on Robot, Vision and Signal Processing (RVSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/policy/2012/4735/0/4735a094",
"title": "Content Analysis of Privacy Policies for Health Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/policy/2012/4735a094/12OmNz5JC1o",
"parentPublication": {
"id": "proceedings/policy/2012/4735/0",
"title": "Policies for Distributed Systems and Networks, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2013/5022/0/5022b675",
"title": "ProTACD: A Generic Privacy Process for Vehicle Development",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2013/5022b675/12OmNzSQdhi",
"parentPublication": {
"id": "proceedings/trustcom/2013/5022/0",
"title": "2013 12th IEEE International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbd/2018/8034/0/803400a257",
"title": "Privacy Preservation of Location Information Based on MinHash Algorithm in Online Ride-Hailing Services",
"doi": null,
"abstractUrl": "/proceedings-article/cbd/2018/803400a257/17D45Wda7f2",
"parentPublication": {
"id": "proceedings/cbd/2018/8034/0",
"title": "2018 Sixth International Conference on Advanced Cloud and Big Data (CBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2018/06/08617744",
"title": "Autonomous Cars: Social and Economic Implications",
"doi": null,
"abstractUrl": "/magazine/it/2018/06/08617744/17D45X2fUEX",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom-bigdatase/2018/4388/0/438801b091",
"title": "Track me if you can? Query Based Dual Location Privacy in VANETs for V2V and V2I",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase/2018/438801b091/17D45XwUAMd",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase/2018/4388/0",
"title": "2018 17th IEEE International Conference On Trust, Security And Privacy In Computing And Communications/ 12th IEEE International Conference On Big Data Science And Engineering (TrustCom/BigDataSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nca/2021/9550/0/09685942",
"title": "Accountable and privacy-aware flexible car sharing and rental services",
"doi": null,
"abstractUrl": "/proceedings-article/nca/2021/09685942/1AC8QYRODcY",
"parentPublication": {
"id": "proceedings/nca/2021/9550/0",
"title": "2021 IEEE 20th International Symposium on Network Computing and Applications (NCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2020/3497/0/349700b184",
"title": "I Know Where You Parked Last Summer : Automated Reverse Engineering and Privacy Analysis of Modern Cars",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2020/349700b184/1j2LgepkJj2",
"parentPublication": {
"id": "proceedings/sp/2020/3497/0/",
"title": "2020 IEEE Symposium on Security and Privacy (SP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2022/02/09170827",
"title": "Privacy-Preserving Navigation Supporting Similar Queries in Vehicular Networks",
"doi": null,
"abstractUrl": "/journal/tq/2022/02/09170827/1moto8OWKuQ",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxE2mWp",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx0RIXh",
"doi": "10.1109/CVPR.2010.5540162",
"title": "Hybrid shift map for video retargeting",
"normalizedTitle": "Hybrid shift map for video retargeting",
"abstract": "We propose a new method for video retargeting, which can generate spatial-temporal consistent video. The new measure called spatial-temporal naturality preserves the motion in the source video without any motion analysis in contrast to other methods that need motion estimation. This advantage prevents the retargeted video from degenerating due to the propagation of the errors in motion analysis. It allows the proposed method to be applied on challenging videos with complex camera and object motion. To improve the efficiency of the retargeting process, we retarget video using a 3D shift map in low resolution and refine it using an incremental 2D shift map in higher resolution. This new hierarchical framework, denoted as hybrid shift map, can produce satisfactory retargeting results while significantly improving the computational efficiency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new method for video retargeting, which can generate spatial-temporal consistent video. The new measure called spatial-temporal naturality preserves the motion in the source video without any motion analysis in contrast to other methods that need motion estimation. This advantage prevents the retargeted video from degenerating due to the propagation of the errors in motion analysis. It allows the proposed method to be applied on challenging videos with complex camera and object motion. To improve the efficiency of the retargeting process, we retarget video using a 3D shift map in low resolution and refine it using an incremental 2D shift map in higher resolution. This new hierarchical framework, denoted as hybrid shift map, can produce satisfactory retargeting results while significantly improving the computational efficiency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new method for video retargeting, which can generate spatial-temporal consistent video. The new measure called spatial-temporal naturality preserves the motion in the source video without any motion analysis in contrast to other methods that need motion estimation. This advantage prevents the retargeted video from degenerating due to the propagation of the errors in motion analysis. It allows the proposed method to be applied on challenging videos with complex camera and object motion. To improve the efficiency of the retargeting process, we retarget video using a 3D shift map in low resolution and refine it using an incremental 2D shift map in higher resolution. This new hierarchical framework, denoted as hybrid shift map, can produce satisfactory retargeting results while significantly improving the computational efficiency.",
"fno": "05540162",
"keywords": [],
"authors": [
{
"affiliation": "School of Computer Engineering, Nanyang Technological University, Singapore 639798",
"fullName": "Yiqun Hu",
"givenName": "Yiqun",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Engineering, Nanyang Technological University, Singapore 639798",
"fullName": "Deepu Rajan",
"givenName": "Deepu",
"surname": "Rajan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-06-01T00:00:00",
"pubType": "proceedings",
"pages": "577-584",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-6984-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05540165",
"articleId": "12OmNzwZ6vw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05540163",
"articleId": "12OmNBd9T45",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032e568",
"title": "Weakly- and Self-Supervised Learning for Content-Aware Deep Image Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e568/12OmNANkomn",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995445",
"title": "Importance filtering for image retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995445/12OmNAlNiVo",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459159",
"title": "Shift-map image editing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459159/12OmNroij4z",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607511",
"title": "Visual preserving video retargeting with deformable shape consistency",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607511/12OmNxVV5V9",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012025",
"title": "Coarse-to-fine temporal optimization for video retargeting based on seam carving",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012025/12OmNyqRnmI",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2007/1630/0/04409010",
"title": "Non-homogeneous Content-driven Video-retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2007/04409010/12OmNyz5JZx",
"parentPublication": {
"id": "proceedings/iccv/2007/1630/0",
"title": "2007 11th IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101677",
"title": "Content-Aware Video Retargeting Using Object-Preserving Warping",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101677/13rRUxASuMC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g517",
"title": "Depth-Aware Stereo Video Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g517/17D45Xtvpe1",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b655",
"title": "Warping-Based Stereoscopic 3D Video Retargeting With Depth Remapping",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b655/18j8LvV2AJG",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a412",
"title": "Cross-identity Video Motion Retargeting with Joint Transformation and Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a412/1La4J01TyRa",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxE2mTG",
"title": "2007 International Conference on Multimedia & Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzkMlMm",
"doi": "10.1109/ICME.2007.4284819",
"title": "Real-Time Humanoid Avatar for Multimodal Human-Machine Interaction",
"normalizedTitle": "Real-Time Humanoid Avatar for Multimodal Human-Machine Interaction",
"abstract": "A novel framework of multimodal human-machine or human-human interaction via real-time humanoid avatar communication is proposed for real-world mobile application. It integrates audio-visual analysis and synthesis modules to realize real-time head tracking, multichannel and runtime animations, visual TTS and real-time viseme detection and rendering. The 3D avatar provides customized modeling for low-bit rate virtual communication by adopting M3G standard and supports MPEG-4 FAPs. A robust user head tracker and the associated head pose and motion estimation scheme are developed for real-time avatar animation control at remote locations. The framework is recognized as an effective design for realistic industrial products of human-to-human mobile communication.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A novel framework of multimodal human-machine or human-human interaction via real-time humanoid avatar communication is proposed for real-world mobile application. It integrates audio-visual analysis and synthesis modules to realize real-time head tracking, multichannel and runtime animations, visual TTS and real-time viseme detection and rendering. The 3D avatar provides customized modeling for low-bit rate virtual communication by adopting M3G standard and supports MPEG-4 FAPs. A robust user head tracker and the associated head pose and motion estimation scheme are developed for real-time avatar animation control at remote locations. The framework is recognized as an effective design for realistic industrial products of human-to-human mobile communication.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A novel framework of multimodal human-machine or human-human interaction via real-time humanoid avatar communication is proposed for real-world mobile application. It integrates audio-visual analysis and synthesis modules to realize real-time head tracking, multichannel and runtime animations, visual TTS and real-time viseme detection and rendering. The 3D avatar provides customized modeling for low-bit rate virtual communication by adopting M3G standard and supports MPEG-4 FAPs. A robust user head tracker and the associated head pose and motion estimation scheme are developed for real-time avatar animation control at remote locations. The framework is recognized as an effective design for realistic industrial products of human-to-human mobile communication.",
"fno": "04284819",
"keywords": [
"Avatars",
"Computer Animation",
"Mobile Computing",
"Motion Estimation",
"User Interfaces",
"Multimodal Human Machine Interaction",
"Real Time Humanoid Avatar Communication",
"Audio Visual Analysis",
"Real Time Head Tracking",
"MPEG 4",
"Robust User Head Tracker",
"Motion Estimation",
"Human To Human Mobile Communication",
"Avatars",
"Man Machine Systems",
"Mobile Communication",
"Animation",
"Runtime",
"Speech Synthesis",
"Communication Standards",
"MPEG 4 Standard",
"Financial Advantage Program",
"Robust Control"
],
"authors": [
{
"affiliation": "Beckman Institute, University of Illinois at Urbana-Champaign (UIUC), Urbana, IL 61801, USA",
"fullName": "Yun Fu",
"givenName": "Yun",
"surname": "Fu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Multimedia Research Lab (MRL), Motorola Labs, Schaumburg, IL 60196, USA",
"fullName": "Renxiang Li",
"givenName": "Renxiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beckman Institute, University of Illinois at Urbana-Champaign (UIUC), Urbana, IL 61801, USA",
"fullName": "Thomas S. Huang",
"givenName": "Thomas S.",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Multimedia Research Lab (MRL), Motorola Labs, Schaumburg, IL 60196, USA",
"fullName": "Mike Danielsen",
"givenName": "Mike",
"surname": "Danielsen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-07-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2007",
"issn": "1945-7871",
"isbn": "1-4244-1016-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04284818",
"articleId": "12OmNzfXau5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04284820",
"articleId": "12OmNs0kyyW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223369",
"title": "Human-avatar interaction and recognition memory according to interaction types and methods",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223369/12OmNvEQsfz",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607657",
"title": "Real-time conversion from a single 2D face image to a 3D text-driven emotive audio-visual avatar",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607657/12OmNx3HI5p",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/1/01394141",
"title": "MPEG-4 compliant reproduction of face animation created in Maya",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394141/12OmNy7h3bg",
"parentPublication": {
"id": "proceedings/icme/2004/8603/1",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2016/1269/0/07759995",
"title": "Humanoid avatar mentor: Integrating VLE and traditional classroom environment for distance learning",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07759995/12OmNyRg4fm",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbit/2007/2999/0/29990628",
"title": "Usability Evaluation of Humanoid-Animation Avatar with Physiological Signals",
"doi": null,
"abstractUrl": "/proceedings-article/fbit/2007/29990628/12OmNzVoBV7",
"parentPublication": {
"id": "proceedings/fbit/2007/2999/0",
"title": "2007 Frontiers in the Convergence of Bioscience and Information Technologies (FBIT '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/3/01394614",
"title": "Real-time human proxy: an avatar-based interaction system",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394614/12OmNzd7bQI",
"parentPublication": {
"id": "proceedings/icme/2004/8603/3",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1998/05/mcg1998050070",
"title": "Analyzing Facial Expressions for Virtual Conferencing",
"doi": null,
"abstractUrl": "/magazine/cg/1998/05/mcg1998050070/13rRUx0xPyO",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2019/3891/0/08925115",
"title": "An Open-Source Avatar for Real-Time Human-Agent Interaction Applications",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2019/08925115/1fHFgdi6V9u",
"parentPublication": {
"id": "proceedings/aciiw/2019/3891/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2021/3875/0/387500a362",
"title": "Multimodal Data Integration for Interactive and Realistic Avatar Simulation in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2021/387500a362/1yBGbf6wUIU",
"parentPublication": {
"id": "proceedings/iri/2021/3875/0",
"title": "2021 IEEE 22nd International Conference on Information Reuse and Integration for Data Science (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1tMztYH",
"doi": "10.1109/VR.2018.8446152",
"title": "Simulating Movement Interactions Between Avatars & Agents in Virtual Worlds Using Human Motion Constraints",
"normalizedTitle": "Simulating Movement Interactions Between Avatars & Agents in Virtual Worlds Using Human Motion Constraints",
"abstract": "We present an interactive algorithm to generate plausible movements for human-like agents interacting with other agents or avatars in a virtual environment. Our approach takes into account high-dimensional human motion constraints and bio-mechanical constraints to compute collision-free trajectories for each agent. We present a novel full-body movement constrained-velocity computation algorithm that can easily be combined with many existing motion synthesis techniques. Compared to prior local navigation methods, our formulation reduces artefacts that arise in dense scenarios and close interactions, and results in smoother and plausible locomotive behaviors. We have evaluated the benefits of our new algorithm in single-agent and multi-agent environments. We investigated the perception of a single agent's movements in dense scenarios and observed that our algorithm has a strong positive effect on the perceived quality of the simulation. Our approach also allows the user to interact with the agents from a first-person perspective in immersive settings. We conducted a study to investigate the perception of such avatar-agent interactions, and found that interactions generated using our approach lead to an increase in the user's sense of co-presence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an interactive algorithm to generate plausible movements for human-like agents interacting with other agents or avatars in a virtual environment. Our approach takes into account high-dimensional human motion constraints and bio-mechanical constraints to compute collision-free trajectories for each agent. We present a novel full-body movement constrained-velocity computation algorithm that can easily be combined with many existing motion synthesis techniques. Compared to prior local navigation methods, our formulation reduces artefacts that arise in dense scenarios and close interactions, and results in smoother and plausible locomotive behaviors. We have evaluated the benefits of our new algorithm in single-agent and multi-agent environments. We investigated the perception of a single agent's movements in dense scenarios and observed that our algorithm has a strong positive effect on the perceived quality of the simulation. Our approach also allows the user to interact with the agents from a first-person perspective in immersive settings. We conducted a study to investigate the perception of such avatar-agent interactions, and found that interactions generated using our approach lead to an increase in the user's sense of co-presence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an interactive algorithm to generate plausible movements for human-like agents interacting with other agents or avatars in a virtual environment. Our approach takes into account high-dimensional human motion constraints and bio-mechanical constraints to compute collision-free trajectories for each agent. We present a novel full-body movement constrained-velocity computation algorithm that can easily be combined with many existing motion synthesis techniques. Compared to prior local navigation methods, our formulation reduces artefacts that arise in dense scenarios and close interactions, and results in smoother and plausible locomotive behaviors. We have evaluated the benefits of our new algorithm in single-agent and multi-agent environments. We investigated the perception of a single agent's movements in dense scenarios and observed that our algorithm has a strong positive effect on the perceived quality of the simulation. Our approach also allows the user to interact with the agents from a first-person perspective in immersive settings. We conducted a study to investigate the perception of such avatar-agent interactions, and found that interactions generated using our approach lead to an increase in the user's sense of co-presence.",
"fno": "08446152",
"keywords": [
"Avatars",
"Computer Animation",
"Image Motion Analysis",
"Multi Agent Systems",
"Movement Interactions",
"Virtual Worlds",
"Interactive Algorithm",
"Plausible Movements",
"Virtual Environment",
"Bio Mechanical Constraints",
"Collision Free Trajectories",
"Full Body Movement Constrained Velocity Computation Algorithm",
"Dense Scenarios",
"Plausible Locomotive Behaviors",
"Single Agent",
"Multiagent Environments",
"Avatar Agent Interactions",
"High Dimensional Human Motion Constraints",
"Motion Synthesis Techniques",
"Local Navigation Methods",
"Human Motion Constraints",
"Two Dimensional Displays",
"Avatars",
"Trajectory",
"Navigation",
"Computational Modeling",
"Collision Avoidance",
"Solid Modeling",
"Multi Agent Simulation",
"Virtual Reality",
"Avatars",
"Human Agents",
"Interactive Navigation Human Centered Computing User Studies",
"Human Centered Computing Virtual Reality",
"Computing Methodologies Artificial Intelligence",
"Computing Methodologies Motion Path Planning",
"Computing Methodologies Modeling And Simulation"
],
"authors": [
{
"affiliation": "University of North Carolina, Chapel Hill",
"fullName": "Sahil Narang",
"givenName": "Sahil",
"surname": "Narang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina, Chapel Hill",
"fullName": "Andrew Best",
"givenName": "Andrew",
"surname": "Best",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina, Chapel Hill",
"fullName": "Dinesh Manocha",
"givenName": "Dinesh",
"surname": "Manocha",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "9-16",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446539",
"articleId": "13bd1h03qOe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446229",
"articleId": "13bd1fHrlRx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vs-games/2009/3588/0/3588a012",
"title": "Emotionally Responsive Robotic Avatars as Characters in Virtual Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2009/3588a012/12OmNqBbHEu",
"parentPublication": {
"id": "proceedings/vs-games/2009/3588/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2012/0430/0/06386498",
"title": "Ensemble modeling and control for congestion management in automated warehouses",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386498/12OmNqzu6Uu",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130234",
"title": "Virtual Tawaf: A case study in simulating the behavior of dense, heterogeneous crowds",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130234/12OmNyS6RAO",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2014/6854/0/6854a103",
"title": "Crowd Simulation for Evacuation Behaviors Based on Multi-agent System and Cellular Automaton",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a103/12OmNzZmZme",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642370",
"title": "Inferring User Intent using Bayesian Theory of Mind in Shared Avatar-Agent Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642370/17PYElVL3mE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794519",
"title": "<italic>FVA:</italic> Modeling Perceived Friendliness of Virtual Agents Using Movement Characteristics",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794519/1cr2Z4zxQ9q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/03/08865441",
"title": "Heter-Sim: Heterogeneous Multi-Agent Systems Simulation by Interactive Data-Driven Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2021/03/08865441/1e2DgJkkm0E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089518",
"title": "SPA: Verbal Interactions between Agents and Avatars in Shared Virtual Environments using Propositional Planning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089518/1jIxcUvDJBu",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2020/7303/0/730300a001",
"title": "Comparison of Collision Avoidance Algorithms for Autonomous Multi-agent Systems",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2020/730300a001/1nkDgBsnh3q",
"parentPublication": {
"id": "proceedings/compsac/2020/7303/0",
"title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a140",
"title": "Exploring behaviour towards avatars and agents in immersive virtual environments with mixed-agency interactions",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a140/1tnXfmuYmli",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKisL",
"title": "2018 7th International Conference on Digital Home (ICDH)",
"acronym": "icdh",
"groupId": "1802037",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WK5Akh",
"doi": "10.1109/ICDH.2018.00064",
"title": "A Motion Retargeting Method with Footstep Constraints",
"normalizedTitle": "A Motion Retargeting Method with Footstep Constraints",
"abstract": "In the field of animation and virtualization, there are many existing motion retargeting methods while those methods are not widely applied to practical production. When we transfer a human motion to an avatar in practice, the foot distortion is most obvious. In this paper we present a motion retargeting method for 3D human body with footstep constraints. With the foot end-effector constraints, we solve the footstep slip problem and keep the feet on the ground. To obtain more reasonable results, we also do some smoothing processing with constraints. In this paper we present our experimental results on real captured motion data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the field of animation and virtualization, there are many existing motion retargeting methods while those methods are not widely applied to practical production. When we transfer a human motion to an avatar in practice, the foot distortion is most obvious. In this paper we present a motion retargeting method for 3D human body with footstep constraints. With the foot end-effector constraints, we solve the footstep slip problem and keep the feet on the ground. To obtain more reasonable results, we also do some smoothing processing with constraints. In this paper we present our experimental results on real captured motion data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the field of animation and virtualization, there are many existing motion retargeting methods while those methods are not widely applied to practical production. When we transfer a human motion to an avatar in practice, the foot distortion is most obvious. In this paper we present a motion retargeting method for 3D human body with footstep constraints. With the foot end-effector constraints, we solve the footstep slip problem and keep the feet on the ground. To obtain more reasonable results, we also do some smoothing processing with constraints. In this paper we present our experimental results on real captured motion data.",
"fno": "949700a329",
"keywords": [
"Avatars",
"Computer Animation",
"Virtualisation",
"Human Motion",
"Foot Distortion",
"Motion Retargeting Method",
"3 D Human Body",
"Footstep Constraints",
"Foot End Effector Constraints",
"Footstep Slip Problem",
"Animation",
"Virtualization",
"Voltage Control",
"Motion Retargeting",
"Inverse Kinematic",
"Motion Capture"
],
"authors": [
{
"affiliation": null,
"fullName": "Shaoshuai Xu",
"givenName": "Shaoshuai",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhixun Su",
"givenName": "Zhixun",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xuan Wang",
"givenName": "Xuan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdh",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-11-01T00:00:00",
"pubType": "proceedings",
"pages": "329-333",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9497-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "949700a324",
"articleId": "17D45XDIXOb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "949700a335",
"articleId": "17D45X7VTfw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2011/4602/0/4602a247",
"title": "Foot Trajectory Kept Motion Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a247/12OmNAXglUq",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a096",
"title": "A Motion Retargeting Method for Topologically Different Characters",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a096/12OmNqJq4hB",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2009/3866/0/3866a498",
"title": "Cartoon Motion Capturing and Retargeting by Rigid Shape Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2009/3866a498/12OmNwCJP03",
"parentPublication": {
"id": "proceedings/dicta/2009/3866/0",
"title": "2009 Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05540162",
"title": "Hybrid shift map for video retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05540162/12OmNx0RIXh",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a080",
"title": "Motion Capture and Retargeting of Fish by Monocular Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a080/12OmNxXCGOn",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2007/2994/1/29940349",
"title": "Animation Generation and Retargeting Based on Physics Characteristics",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2007/29940349/12OmNyaGeJ9",
"parentPublication": {
"id": "iih-msp/2007/2994/1",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2008/3456/0/3456a320",
"title": "Multi-scale Cartoon Motion Capture and Retargeting without Shape Matching",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2008/3456a320/12OmNzcPAxp",
"parentPublication": {
"id": "proceedings/dicta/2008/3456/0",
"title": "2008 Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200j700",
"title": "Contact-Aware Retargeting of Skinned Motion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200j700/1BmIsrlulzO",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a412",
"title": "Cross-identity Video Motion Retargeting with Joint Transformation and Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a412/1La4J01TyRa",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300j711",
"title": "Joint Face Detection and Facial Motion Retargeting for Multiple Faces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300j711/1gyrrvF3hok",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmIsrlulzO",
"doi": "10.1109/ICCV48922.2021.00958",
"title": "Contact-Aware Retargeting of Skinned Motion",
"normalizedTitle": "Contact-Aware Retargeting of Skinned Motion",
"abstract": "This paper introduces a motion retargeting method that preserves self-contacts and prevents interpenetration. Self-contacts, such as when hands touch each other or the torso or the head, are important attributes of human body language and dynamics, yet existing methods do not model or preserve these contacts. Likewise, interpenetration, such as a hand passing into the torso, are a typical artifact of motion estimation methods. The input to our method is a human motion sequence and a target skeleton and character geometry. The method identifies self-contacts and ground contacts in the input motion, and optimizes the motion to apply to the output skeleton, while preserving these contacts and reducing interpenetration. We introduce a novel geometry-conditioned recurrent network with an encoder-space optimization strategy that achieves efficient retargeting while satisfying contact constraints. In experiments, our results quantitatively outperform previous methods and we conduct a user study where our retargeted motions are rated as higher-quality than those produced by recent works. We also show our method generalizes to motion estimated from human videos where we improve over previous works that produce noticeable interpenetration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a motion retargeting method that preserves self-contacts and prevents interpenetration. Self-contacts, such as when hands touch each other or the torso or the head, are important attributes of human body language and dynamics, yet existing methods do not model or preserve these contacts. Likewise, interpenetration, such as a hand passing into the torso, are a typical artifact of motion estimation methods. The input to our method is a human motion sequence and a target skeleton and character geometry. The method identifies self-contacts and ground contacts in the input motion, and optimizes the motion to apply to the output skeleton, while preserving these contacts and reducing interpenetration. We introduce a novel geometry-conditioned recurrent network with an encoder-space optimization strategy that achieves efficient retargeting while satisfying contact constraints. In experiments, our results quantitatively outperform previous methods and we conduct a user study where our retargeted motions are rated as higher-quality than those produced by recent works. We also show our method generalizes to motion estimated from human videos where we improve over previous works that produce noticeable interpenetration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a motion retargeting method that preserves self-contacts and prevents interpenetration. Self-contacts, such as when hands touch each other or the torso or the head, are important attributes of human body language and dynamics, yet existing methods do not model or preserve these contacts. Likewise, interpenetration, such as a hand passing into the torso, are a typical artifact of motion estimation methods. The input to our method is a human motion sequence and a target skeleton and character geometry. The method identifies self-contacts and ground contacts in the input motion, and optimizes the motion to apply to the output skeleton, while preserving these contacts and reducing interpenetration. We introduce a novel geometry-conditioned recurrent network with an encoder-space optimization strategy that achieves efficient retargeting while satisfying contact constraints. In experiments, our results quantitatively outperform previous methods and we conduct a user study where our retargeted motions are rated as higher-quality than those produced by recent works. We also show our method generalizes to motion estimated from human videos where we improve over previous works that produce noticeable interpenetration.",
"fno": "281200j700",
"keywords": [
"Torso",
"Geometry",
"Recurrent Neural Networks",
"Shape",
"Motion Estimation",
"Skeleton",
"Encoding",
"Motion And Tracking",
"Gestures And Body Pose"
],
"authors": [
{
"affiliation": "Adobe Research",
"fullName": "Ruben Villegas",
"givenName": "Ruben",
"surname": "Villegas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Duygu Ceylan",
"givenName": "Duygu",
"surname": "Ceylan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Aaron Hertzmann",
"givenName": "Aaron",
"surname": "Hertzmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Jimei Yang",
"givenName": "Jimei",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Jun Saito",
"givenName": "Jun",
"surname": "Saito",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "9700-9709",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200j690",
"articleId": "1BmH5OQpCH6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200j710",
"articleId": "1BmLin8Zd4s",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2011/4602/0/4602a247",
"title": "Foot Trajectory Kept Motion Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a247/12OmNAXglUq",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2011/4588/0/4588a480",
"title": "Graph Rigidity for Near-Coplanar Structure from Motion",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2011/4588a480/12OmNC3Xhnn",
"parentPublication": {
"id": "proceedings/dicta/2011/4588/0",
"title": "2011 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a096",
"title": "A Motion Retargeting Method for Topologically Different Characters",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a096/12OmNqJq4hB",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a080",
"title": "Motion Capture and Retargeting of Fish by Monocular Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a080/12OmNxXCGOn",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012025",
"title": "Coarse-to-fine temporal optimization for video retargeting based on seam carving",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012025/12OmNyqRnmI",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2008/3456/0/3456a320",
"title": "Multi-scale Cartoon Motion Capture and Retargeting without Shape Matching",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2008/3456a320/12OmNzcPAxp",
"parentPublication": {
"id": "proceedings/dicta/2008/3456/0",
"title": "2008 Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101677",
"title": "Content-Aware Video Retargeting Using Object-Preserving Warping",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101677/13rRUxASuMC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2018/9497/0/949700a329",
"title": "A Motion Retargeting Method with Footstep Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2018/949700a329/17D45WK5Akh",
"parentPublication": {
"id": "proceedings/icdh/2018/9497/0",
"title": "2018 7th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a625",
"title": "Shape Aware Haptic Retargeting for Accurate Hand Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a625/1CJcacl0uhq",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f305",
"title": "TransMoMo: Invariance-Driven Unsupervised Video Motion Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f305/1m3nImofZWU",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1KYslFwrlyE",
"title": "2022 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "10044366",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KYsovTRTC8",
"doi": "10.1109/3DV57658.2022.00032",
"title": "HVTR: Hybrid Volumetric-Textural Rendering for Human Avatars",
"normalizedTitle": "HVTR: Hybrid Volumetric-Textural Rendering for Human Avatars",
"abstract": "We propose a novel neural rendering pipeline, Hybrid Volumetric-Textural Rendering (HVTR), which synthesizes virtual human avatars from arbitrary poses efficiently and at high quality. First, we learn to encode articulated human motions on a dense UV manifold of the human body surface. To handle complicated motions (e.g., self-occlusions), we then leverage the encoded information on the UV manifold to construct a 3D volumetric representation based on a dynamic pose-conditioned neural radiance field. While this allows us to represent 3D geometry with changing topology, volumetric rendering is computationally heavy. Hence we employ only a rough volumetric representation using a pose-conditioned downsampled neural radiance field (PD-NeRF), which we can render efficiently at low resolutions. In addition, we learn 2D textural features that are fused with rendered volumetric features in image space. The key advantage of our approach is that we can then convert the fused features into a high-resolution, high-quality avatar by a fast GAN-based textural renderer. We demonstrate that hybrid rendering enables HVTR to handle complicated motions, render high-quality avatars under user-controlled poses/shapes and even loose clothing, and most importantly, be efficient at inference time. Our experimental results also demonstrate state-of-the-art quantitative results. More results are available at our project page: https://www.cs.umd.edu/~taohu/hvtr/",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel neural rendering pipeline, Hybrid Volumetric-Textural Rendering (HVTR), which synthesizes virtual human avatars from arbitrary poses efficiently and at high quality. First, we learn to encode articulated human motions on a dense UV manifold of the human body surface. To handle complicated motions (e.g., self-occlusions), we then leverage the encoded information on the UV manifold to construct a 3D volumetric representation based on a dynamic pose-conditioned neural radiance field. While this allows us to represent 3D geometry with changing topology, volumetric rendering is computationally heavy. Hence we employ only a rough volumetric representation using a pose-conditioned downsampled neural radiance field (PD-NeRF), which we can render efficiently at low resolutions. In addition, we learn 2D textural features that are fused with rendered volumetric features in image space. The key advantage of our approach is that we can then convert the fused features into a high-resolution, high-quality avatar by a fast GAN-based textural renderer. We demonstrate that hybrid rendering enables HVTR to handle complicated motions, render high-quality avatars under user-controlled poses/shapes and even loose clothing, and most importantly, be efficient at inference time. Our experimental results also demonstrate state-of-the-art quantitative results. More results are available at our project page: https://www.cs.umd.edu/~taohu/hvtr/",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel neural rendering pipeline, Hybrid Volumetric-Textural Rendering (HVTR), which synthesizes virtual human avatars from arbitrary poses efficiently and at high quality. First, we learn to encode articulated human motions on a dense UV manifold of the human body surface. To handle complicated motions (e.g., self-occlusions), we then leverage the encoded information on the UV manifold to construct a 3D volumetric representation based on a dynamic pose-conditioned neural radiance field. While this allows us to represent 3D geometry with changing topology, volumetric rendering is computationally heavy. Hence we employ only a rough volumetric representation using a pose-conditioned downsampled neural radiance field (PD-NeRF), which we can render efficiently at low resolutions. In addition, we learn 2D textural features that are fused with rendered volumetric features in image space. The key advantage of our approach is that we can then convert the fused features into a high-resolution, high-quality avatar by a fast GAN-based textural renderer. We demonstrate that hybrid rendering enables HVTR to handle complicated motions, render high-quality avatars under user-controlled poses/shapes and even loose clothing, and most importantly, be efficient at inference time. Our experimental results also demonstrate state-of-the-art quantitative results. More results are available at our project page: https://www.cs.umd.edu/~taohu/hvtr/",
"fno": "567000a197",
"keywords": [
"Avatars",
"Feature Extraction",
"Image Representation",
"Image Texture",
"Rendering Computer Graphics",
"Solid Modelling",
"2 D Textural Features",
"3 D Volumetric Representation",
"Arbitrary Poses",
"Articulated Human Motions",
"Complicated Motions",
"Dense UV Manifold",
"Downsampled Neural Radiance Field",
"Even Loose Clothing",
"Fast GAN Based Textural Renderer",
"High Quality Avatar",
"Human Body Surface",
"HVTR",
"Hybrid Rendering",
"Hybrid Volumetric Textural Rendering",
"Novel Neural Rendering Pipeline",
"Rendered Volumetric Features",
"Rough Volumetric Representation",
"Virtual Human Avatars",
"Volumetric Rendering",
"Manifolds",
"Geometry",
"Three Dimensional Displays",
"Image Resolution",
"Avatars",
"Pipelines",
"Dynamics",
"Video Based Characters",
"Neural Rendering",
"Full Body Avatar"
],
"authors": [
{
"affiliation": "University of Maryland,College Park",
"fullName": "Tao Hu",
"givenName": "Tao",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Tao Yu",
"givenName": "Tao",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Zerong Zheng",
"givenName": "Zerong",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "He Zhang",
"givenName": "He",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Yebin Liu",
"givenName": "Yebin",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland,College Park",
"fullName": "Matthias Zwicker",
"givenName": "Matthias",
"surname": "Zwicker",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-09-01T00:00:00",
"pubType": "proceedings",
"pages": "197-208",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5670-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "567000a187",
"articleId": "1KYswaNUU4o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "567000a209",
"articleId": "1KYsxOml5lu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480772",
"title": "New Rendering Approach for Composable Volumetric Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480772/12OmNBAqZId",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vg/2005/26/0/01500528",
"title": "Volumetric reconstruction, compression and rendering of natural phenomena from multi-video data",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500528/12OmNxvO08M",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/01/v0055",
"title": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures",
"doi": null,
"abstractUrl": "/journal/tg/1998/01/v0055/13rRUxly95q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1990/02/mcg1990020024",
"title": "Volumetric Rendering",
"doi": null,
"abstractUrl": "/magazine/cg/1990/02/mcg1990020024/13rRUy08Myt",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4508",
"title": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4508/1BmJxzOtk4w",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g145",
"title": "NeuralHOFusion: Neural Volumetric Rendering under Human-object Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g145/1H1itCwY51e",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08918030",
"title": "Volumetric Isosurface Rendering with Deep Learning-Based Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08918030/1fm1QUuzRAI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089612",
"title": "Effects of volumetric capture avatars on social presence in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089612/1jIxdAmCCJi",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1728",
"title": "Pixel-aligned Volumetric Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1728/1yeHX163Xnq",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h115",
"title": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h115/1yeLdyIKnV6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxlFindEk",
"doi": "10.1109/VRW50115.2020.00199",
"title": "MotionNote: A Novel Human Pose Representation",
"normalizedTitle": "MotionNote: A Novel Human Pose Representation",
"abstract": "Three-dimensional (3D) avatar/humanoid models and their motions are extensively used in robotics, human-computer interaction, digital entertainment industry, fitness training, rehabilitation, animation and virtual reality (VR). This paper presents ongoing research work on developing a novel motion notation approach called MotionNote for a pose of human bone, like Labanotation and Musical notes are used for dance and musical sound. The representation of motion using MotionNote includes motion data capture and reconstruction on avatar/humanoid model, motion visualization and representation on a unit sphere, and finally motion notation on a two-dimensional (2D) equirectangular perspective grid (EPG). Our preliminary results show that MotionNote is feasible for understanding human motion using the motion notation on 2D EPG.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Three-dimensional (3D) avatar/humanoid models and their motions are extensively used in robotics, human-computer interaction, digital entertainment industry, fitness training, rehabilitation, animation and virtual reality (VR). This paper presents ongoing research work on developing a novel motion notation approach called MotionNote for a pose of human bone, like Labanotation and Musical notes are used for dance and musical sound. The representation of motion using MotionNote includes motion data capture and reconstruction on avatar/humanoid model, motion visualization and representation on a unit sphere, and finally motion notation on a two-dimensional (2D) equirectangular perspective grid (EPG). Our preliminary results show that MotionNote is feasible for understanding human motion using the motion notation on 2D EPG.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Three-dimensional (3D) avatar/humanoid models and their motions are extensively used in robotics, human-computer interaction, digital entertainment industry, fitness training, rehabilitation, animation and virtual reality (VR). This paper presents ongoing research work on developing a novel motion notation approach called MotionNote for a pose of human bone, like Labanotation and Musical notes are used for dance and musical sound. The representation of motion using MotionNote includes motion data capture and reconstruction on avatar/humanoid model, motion visualization and representation on a unit sphere, and finally motion notation on a two-dimensional (2D) equirectangular perspective grid (EPG). Our preliminary results show that MotionNote is feasible for understanding human motion using the motion notation on 2D EPG.",
"fno": "09090443",
"keywords": [
"Trajectory",
"Two Dimensional Displays",
"Music",
"Bones",
"Data Visualization",
"Avatars",
"Conferences",
"Computing Methodologies",
"Computer Graphics",
"Animation",
"Motion Capture",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Techniques",
"Gestural Input"
],
"authors": [
{
"affiliation": "Chung-Ang University,Seoul,Republic of Korea",
"fullName": "Dubeom Kim",
"givenName": "Dubeom",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chung-Ang University,Seoul,Republic of Korea",
"fullName": "Bharatesh Chakravarthi",
"givenName": "Bharatesh",
"surname": "Chakravarthi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chung-Ang University,Seoul,Republic of Korea",
"fullName": "Seong Hun Kim",
"givenName": "Seong Hun",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chung-Ang University,Seoul,Republic of Korea",
"fullName": "Adithya Balasubramanyam",
"givenName": "Adithya",
"surname": "Balasubramanyam",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chung-Ang University,Seoul,Republic of Korea",
"fullName": "Young Ho Chai",
"givenName": "Young Ho",
"surname": "Chai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chung-Ang University,Seoul,Republic of Korea",
"fullName": "Ashok Kumar Patil",
"givenName": "Ashok Kumar",
"surname": "Patil",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "696-697",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090562",
"articleId": "1jIxxU9xtu0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090549",
"articleId": "1jIxttGQGKk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ithings/2014/5967/0/5967a352",
"title": "Scarecrow: Avatar Representation Using Biological Information Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ithings/2014/5967a352/12OmNCcKQP0",
"parentPublication": {
"id": "proceedings/ithings/2014/5967/0",
"title": "2014 IEEE International Conference on Internet of Things(iThings), and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476613",
"title": "Poster: Authoring Tool for Intuitive Editing of Avatar Pose Using a Virtual Puppet",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476613/12OmNqH9hes",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504775",
"title": "Evaluation of the effect of a virtual avatar's representation on distance perception in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504775/12OmNwpXROu",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284819",
"title": "Real-Time Humanoid Avatar for Multimodal Human-Machine Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284819/12OmNzkMlMm",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446152",
"title": "Simulating Movement Interactions Between Avatars & Agents in Virtual Worlds Using Human Motion Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446152/13bd1tMztYH",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040583",
"title": "Human Tails: Ownership and Control of Extended Humanoid Avatars",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040583/13rRUxYrbUF",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1997/02/mcg1997020042",
"title": "Virtual Human Representation and Communication in VLNet",
"doi": null,
"abstractUrl": "/magazine/cg/1997/02/mcg1997020042/13rRUyp7tYZ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523447",
"title": "Retargeting Human-Object Interaction to Virtual Avatars",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523447/13rRUzp02ot",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a237",
"title": "Integrating Biomechanical and Animation Motion Capture Methods in the Production of Participant Specific, Scaled Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a237/17D45XeKgqk",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a274",
"title": "Inverse Kinematics and Temporal Convolutional Networks for Sequential Pose Analysis in VR",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a274/1qpzAz62YQ8",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBOCWvM",
"doi": "10.1109/VR.2016.7504726",
"title": "Redirected head gaze to support AR meetings distributed over heterogeneous environments",
"normalizedTitle": "Redirected head gaze to support AR meetings distributed over heterogeneous environments",
"abstract": "We demonstrate a method for redirecting gaze of virtual avatars in distributed augmented reality (AR) meetings. As social cues are a necessity for effective communication, our method tries to preserve gaze awareness, one of the key elements of a face-to-face meeting. When using AR to bring multiple sites together in a distributed meeting, with different numbers of participants and physical arrangements across sites, gaze awareness is maintained regardless of the seating topology. By maintaining gaze, we hope to enhance the presence of remote attendees and improve communication among the users, making meetings in AR a practical option for teleconferencing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We demonstrate a method for redirecting gaze of virtual avatars in distributed augmented reality (AR) meetings. As social cues are a necessity for effective communication, our method tries to preserve gaze awareness, one of the key elements of a face-to-face meeting. When using AR to bring multiple sites together in a distributed meeting, with different numbers of participants and physical arrangements across sites, gaze awareness is maintained regardless of the seating topology. By maintaining gaze, we hope to enhance the presence of remote attendees and improve communication among the users, making meetings in AR a practical option for teleconferencing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We demonstrate a method for redirecting gaze of virtual avatars in distributed augmented reality (AR) meetings. As social cues are a necessity for effective communication, our method tries to preserve gaze awareness, one of the key elements of a face-to-face meeting. When using AR to bring multiple sites together in a distributed meeting, with different numbers of participants and physical arrangements across sites, gaze awareness is maintained regardless of the seating topology. By maintaining gaze, we hope to enhance the presence of remote attendees and improve communication among the users, making meetings in AR a practical option for teleconferencing.",
"fno": "07504726",
"keywords": [
"Augmented Reality",
"Telecommunication Computing",
"Teleconferencing",
"Seating Topology",
"Face To Face Meeting",
"Gaze Awareness Preservation",
"Social Cues",
"Heterogeneous Environment",
"Distributed Augmented Reality Meetings",
"Virtual Avatars",
"Gaze Redirecting Method",
"AR Meetings",
"Redirected Head Gaze",
"Avatars",
"Teleconferencing",
"Head",
"Topology",
"Prototypes",
"Human Factors",
"Augmented Reality Teleconferencing",
"Computer Supported Collaborative Work"
],
"authors": [
{
"affiliation": "School of Interactive Computing, Georgia Institute of Technology, Atlanta, GA, USA",
"fullName": "Taeheon Kim",
"givenName": "Taeheon",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Interactive Computing, Georgia Institute of Technology, Atlanta, GA, USA",
"fullName": "Ashwin Kachhara",
"givenName": "Ashwin",
"surname": "Kachhara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Interactive Computing, Georgia Institute of Technology, Atlanta, GA, USA",
"fullName": "Blair Maclntyre",
"givenName": "Blair",
"surname": "Maclntyre",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "207-208",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504725",
"articleId": "12OmNC8uRtR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504727",
"articleId": "12OmNwpGgKa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icat/2007/3056/0/30560280",
"title": "Interaction Without Gesture or Speech -- A Gaze Controlled AR System",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2007/30560280/12OmNCcKQtv",
"parentPublication": {
"id": "proceedings/icat/2007/3056/0",
"title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/1994/5090/4/00323502",
"title": "Empirical CATeam research in meetings",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/1994/00323502/12OmNCcbEj6",
"parentPublication": {
"id": "proceedings/hicss/1994/5090/4",
"title": "Proceedings of the Twenty-Seventh Annual Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/1994/5090/4/00323473",
"title": "Using group support systems to improve the New Zealand economy. II. Follow-up results",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/1994/00323473/12OmNvA1h4y",
"parentPublication": {
"id": "proceedings/hicss/1994/5090/4",
"title": "Proceedings of the Twenty-Seventh Annual Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549379",
"title": "Head motion animation using avatar gaze space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699286",
"title": "Effects of Hybrid and Synthetic Social Gaze in Avatar-Mediated Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699286/19F1VntaVYQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a333",
"title": "Local Free-View Neural 3D Head Synthesis for Virtual Group Meetings",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a333/1CJdX2rYlfW",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089433",
"title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a159",
"title": "Exploring the Effect of Visual Cues on Eye Gaze During AR-Guided Picking and Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a159/1yeQM18rD7G",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwcl7Jy",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"acronym": "icinis",
"groupId": "1002524",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs4S8I4",
"doi": "10.1109/ICINIS.2010.29",
"title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642",
"normalizedTitle": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642",
"abstract": "Real-time eye detection is important for many HCI applications, including eye gaze tracking, auto stereoscopic displays, video conferencing, face detection, and recognition. Currently, the eye gaze tracking systems are usually implemented on general purpose processors. As eye gaze tracking algorithms move from research labs to the real world, power consumption and cost become critical issues. This motivates searching for implementations using a digital signal processor (DSP). Our goal in this paper is to explore the feasibility of implementing DSP-based eye gaze tracking systems. To achieve this goal, we implement a fully automatic eye gaze tracking system on Texas Instruments’ TMS320DM642 DSP, profile performance, and analyze opportunities for optimization. Our experiments demonstrate that well-optimized eye gaze tracking implementations on DSP can be an effective choice for embedded eye gaze tracking products.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Real-time eye detection is important for many HCI applications, including eye gaze tracking, auto stereoscopic displays, video conferencing, face detection, and recognition. Currently, the eye gaze tracking systems are usually implemented on general purpose processors. As eye gaze tracking algorithms move from research labs to the real world, power consumption and cost become critical issues. This motivates searching for implementations using a digital signal processor (DSP). Our goal in this paper is to explore the feasibility of implementing DSP-based eye gaze tracking systems. To achieve this goal, we implement a fully automatic eye gaze tracking system on Texas Instruments’ TMS320DM642 DSP, profile performance, and analyze opportunities for optimization. Our experiments demonstrate that well-optimized eye gaze tracking implementations on DSP can be an effective choice for embedded eye gaze tracking products.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Real-time eye detection is important for many HCI applications, including eye gaze tracking, auto stereoscopic displays, video conferencing, face detection, and recognition. Currently, the eye gaze tracking systems are usually implemented on general purpose processors. As eye gaze tracking algorithms move from research labs to the real world, power consumption and cost become critical issues. This motivates searching for implementations using a digital signal processor (DSP). Our goal in this paper is to explore the feasibility of implementing DSP-based eye gaze tracking systems. To achieve this goal, we implement a fully automatic eye gaze tracking system on Texas Instruments’ TMS320DM642 DSP, profile performance, and analyze opportunities for optimization. Our experiments demonstrate that well-optimized eye gaze tracking implementations on DSP can be an effective choice for embedded eye gaze tracking products.",
"fno": "4249a048",
"keywords": [
"Eye Gaze Tracking System",
"DM 642"
],
"authors": [
{
"affiliation": null,
"fullName": "Ruian Liu",
"givenName": "Ruian",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shengtao Ma",
"givenName": "Shengtao",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mimi Zhang",
"givenName": "Mimi",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lei Wang",
"givenName": "Lei",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icinis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-11-01T00:00:00",
"pubType": "proceedings",
"pages": "48-51",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4249-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4249a043",
"articleId": "12OmNBQkx67",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4249a052",
"articleId": "12OmNz5s0SA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/gcis/2009/3571/2/3571b133",
"title": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571b133/12OmNA0vo1q",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/2",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b003",
"title": "Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b003/12OmNwNeYAV",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2011/1451/0/06000327",
"title": "Gaze tracking as a game input interface",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2011/06000327/12OmNxRWIeo",
"parentPublication": {
"id": "proceedings/cgames/2011/1451/0",
"title": "2011 16th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020101",
"title": "Non-Contact Eye Gaze Tracking System by Mapping of Corneal Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020101/12OmNzgwmIY",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/03/ttp2010030478",
"title": "In the Eye of the Beholder: A Survey of Models for Eyes and Gaze",
"doi": null,
"abstractUrl": "/journal/tp/2010/03/ttp2010030478/13rRUxOdD9o",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a399",
"title": "Real-Time Gaze Tracking with Event-Driven Eye Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a399/1CJbTrAdAju",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a594",
"title": "High-speed Gaze-oriented Projection by Cross-ratio-based Eye Tracking with Dual Infrared Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a594/1CJewqWywOk",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389490",
"title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvRU0cM",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyQYt9o",
"doi": "10.1109/ISMAR-Adjunct.2017.36",
"title": "[POSTER] Mutually Shared Gaze in Augmented Video Conference",
"normalizedTitle": "[POSTER] Mutually Shared Gaze in Augmented Video Conference",
"abstract": "Augmenting video conference with additional visual cues has been studied to improve remote collaboration. A common setup is a person wearing a head-mounted display (HMD) and camera sharing her view of the workspace with a remote collaborator and getting assistance on a real-world task. While this configuration has been extensively studied, there has been little research on how sharing gaze cues might affect the collaboration. This research investigates how sharing gaze in both directions between a local worker and remote helper affects the collaboration and communication. We developed a prototype system that shares the eye gaze of both users, and conducted a user study. Preliminary results showed that sharing gaze significantly improves the awareness of each other's focus, hence improving collaboration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmenting video conference with additional visual cues has been studied to improve remote collaboration. A common setup is a person wearing a head-mounted display (HMD) and camera sharing her view of the workspace with a remote collaborator and getting assistance on a real-world task. While this configuration has been extensively studied, there has been little research on how sharing gaze cues might affect the collaboration. This research investigates how sharing gaze in both directions between a local worker and remote helper affects the collaboration and communication. We developed a prototype system that shares the eye gaze of both users, and conducted a user study. Preliminary results showed that sharing gaze significantly improves the awareness of each other's focus, hence improving collaboration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmenting video conference with additional visual cues has been studied to improve remote collaboration. A common setup is a person wearing a head-mounted display (HMD) and camera sharing her view of the workspace with a remote collaborator and getting assistance on a real-world task. While this configuration has been extensively studied, there has been little research on how sharing gaze cues might affect the collaboration. This research investigates how sharing gaze in both directions between a local worker and remote helper affects the collaboration and communication. We developed a prototype system that shares the eye gaze of both users, and conducted a user study. Preliminary results showed that sharing gaze significantly improves the awareness of each other's focus, hence improving collaboration.",
"fno": "6327a079",
"keywords": [
"Collaboration",
"Prototypes",
"Visualization",
"Resists",
"Mice",
"Cameras",
"Remote Collaboration",
"Augmented Video Conference",
"Eye Gaze Tracking"
],
"authors": [
{
"affiliation": null,
"fullName": "Gun Lee",
"givenName": "Gun",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Seungwon Kim",
"givenName": "Seungwon",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Youngho Lee",
"givenName": "Youngho",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Arindam Dey",
"givenName": "Arindam",
"surname": "Dey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Thammatip Piumsomboon",
"givenName": "Thammatip",
"surname": "Piumsomboon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mitchell Norman",
"givenName": "Mitchell",
"surname": "Norman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "79-80",
"year": "2017",
"issn": null,
"isbn": "978-0-7695-6327-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6327a075",
"articleId": "12OmNzC5Tor",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6327a081",
"articleId": "12OmNyuPL0u",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948461",
"title": "[Poster] Social panoramas using wearable computers",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948461/12OmNB0nWbG",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948412",
"title": "Improving co-presence with augmented visual communication cues for sharing experience through video conference",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948412/12OmNwudQT3",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523400",
"title": "Do You See What I See? The Effect of Gaze Tracking on Task Space Remote Collaboration",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523400/13rRUy0HYRu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a250",
"title": "Using Speech to Visualise Shared Gaze Cues in MR Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a250/1CJcnpSVomk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798024",
"title": "Head Pointer or Eye Gaze: Which Helps More in MR Remote Collaboration?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798024/1cJ0MmguvG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a393",
"title": "Wearable RemoteFusion: A Mixed Reality Remote Collaboration System with Local Eye Gaze and Remote Hand Gesture Sharing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a393/1gysjIlsYus",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a022",
"title": "Merging Live and Static 360 Panoramas Inside a 3D Scene for Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a022/1gysn0YPLm8",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a473",
"title": "The Impact of Gaze Cues in Mixed Reality Collaborations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a473/1yeQCejb7Co",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13xI8A66zF1",
"title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"acronym": "aipr",
"groupId": "1000046",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "13xI8AAc3rr",
"doi": "10.1109/AIPR.2017.8457962",
"title": "Gaze Tracking in 3D Space with a Convolution Neural Network “See What I See”",
"normalizedTitle": "Gaze Tracking in 3D Space with a Convolution Neural Network “See What I See”",
"abstract": "This paper presents integrated architecture to estimate gaze vectors under unrestricted head motions. Since previous approaches focused on estimating gaze toward a small planar screen, calibration is needed prior to use. With a Kinect device, we develop a method that relies on depth sensing to obtain robust and accurate head pose tracking and obtain the eye-in-head gaze direction information by training the visual data from eye images with a Neural Network (NN) model. Our model uses a Convolution Neural Network (CNN) that has five layers: two sets of convolution-pooling pairs and a fully connected-output layer. The filters are taken from the random patches of the images in an unsupervised way by k-means clustering. The learned filters are fed to a convolution layer, each of which is followed by a pooling layer, to reduce the resolution of the feature map and the sensitivity of the output to the shifts and the distortions. In the end, fully connected layers can be used as a classifier with a feed-forward-based process to obtain the weight. We reconstruct the gaze vectors from a set of head and eye pose orientations. The results of this approach suggest that the gaze estimation error is 5 degrees. This model is more accurate than a simple NN and an adaptive linear regression (ALR) approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents integrated architecture to estimate gaze vectors under unrestricted head motions. Since previous approaches focused on estimating gaze toward a small planar screen, calibration is needed prior to use. With a Kinect device, we develop a method that relies on depth sensing to obtain robust and accurate head pose tracking and obtain the eye-in-head gaze direction information by training the visual data from eye images with a Neural Network (NN) model. Our model uses a Convolution Neural Network (CNN) that has five layers: two sets of convolution-pooling pairs and a fully connected-output layer. The filters are taken from the random patches of the images in an unsupervised way by k-means clustering. The learned filters are fed to a convolution layer, each of which is followed by a pooling layer, to reduce the resolution of the feature map and the sensitivity of the output to the shifts and the distortions. In the end, fully connected layers can be used as a classifier with a feed-forward-based process to obtain the weight. We reconstruct the gaze vectors from a set of head and eye pose orientations. The results of this approach suggest that the gaze estimation error is 5 degrees. This model is more accurate than a simple NN and an adaptive linear regression (ALR) approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents integrated architecture to estimate gaze vectors under unrestricted head motions. Since previous approaches focused on estimating gaze toward a small planar screen, calibration is needed prior to use. With a Kinect device, we develop a method that relies on depth sensing to obtain robust and accurate head pose tracking and obtain the eye-in-head gaze direction information by training the visual data from eye images with a Neural Network (NN) model. Our model uses a Convolution Neural Network (CNN) that has five layers: two sets of convolution-pooling pairs and a fully connected-output layer. The filters are taken from the random patches of the images in an unsupervised way by k-means clustering. The learned filters are fed to a convolution layer, each of which is followed by a pooling layer, to reduce the resolution of the feature map and the sensitivity of the output to the shifts and the distortions. In the end, fully connected layers can be used as a classifier with a feed-forward-based process to obtain the weight. We reconstruct the gaze vectors from a set of head and eye pose orientations. The results of this approach suggest that the gaze estimation error is 5 degrees. This model is more accurate than a simple NN and an adaptive linear regression (ALR) approach.",
"fno": "08457962",
"keywords": [
"Convolution",
"Head",
"Gaze Tracking",
"Three Dimensional Displays",
"Feature Extraction",
"Biological Neural Networks"
],
"authors": [
{
"affiliation": "Department of Mechanical and Bioengineering, Osaka University, Osaka, Japan",
"fullName": "Amalia I Adiba",
"givenName": "Amalia I",
"surname": "Adiba",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical and Bioengineering, Osaka University, Osaka, Japan",
"fullName": "Satoshi Asatani",
"givenName": "Satoshi",
"surname": "Asatani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical and Bioengineering, Osaka University, Osaka, Japan",
"fullName": "Seiichi Tagawa",
"givenName": "Seiichi",
"surname": "Tagawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical and Bioengineering, Osaka University, Osaka, Japan",
"fullName": "Hirohiko Niioka",
"givenName": "Hirohiko",
"surname": "Niioka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical and Bioengineering, Osaka University, Osaka, Japan",
"fullName": "Jun Miyake",
"givenName": "Jun",
"surname": "Miyake",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2017",
"issn": "2332-5615",
"isbn": "978-1-5386-1235-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "08457973",
"articleId": "13xI8ALy0Ea",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032d162",
"title": "Monocular Free-Head 3D Gaze Tracking with Deep Learning and Geometry Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d162/12OmNxbmSBT",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523400",
"title": "Do You See What I See? The Effect of Gaze Tracking on Task Space Remote Collaboration",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523400/13rRUy0HYRu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a375",
"title": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a375/1JrQRCijhMk",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2019/9151/0/08730846",
"title": "Gaze Estimation Using Residual Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2019/08730846/1aDSMwUBvBS",
"parentPublication": {
"id": "proceedings/percom-workshops/2019/9151/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998375",
"title": "DGaze: CNN-Based Gaze Prediction in Dynamic Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998375/1hpPBdSWXTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a554",
"title": "Estimating Gaze From Head and Hand Pose and Scene Images for Open-Ended Exploration in VR Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a554/1tnY5akLwvS",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKiru",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WXIkI8",
"doi": "10.1109/CVPRW.2018.00290",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"normalizedTitle": "Light-Weight Head Pose Invariant Gaze Tracking",
"abstract": "Unconstrained remote gaze tracking using off-the-shelf cameras is a challenging problem. Recently, promising algorithms for appearance-based gaze estimation using convolutional neural networks (CNN) have been proposed. Improving their robustness to various confounding factors including variable head pose, subject identity, illumination and image quality remain open problems. In this work, we study the effect of variable head pose on machine learning regressors trained to estimate gaze direction. We propose a novel branched CNN architecture that improves the robustness of gaze classifiers to variable head pose, without increasing computational cost. We also present various procedures to effectively train our gaze network including transfer learning from the more closely related task of object viewpoint estimation and from a large high-fidelity synthetic gaze dataset, which enable our ten times faster gaze network to achieve competitive accuracy to its current state-of-the-art direct competitor.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Unconstrained remote gaze tracking using off-the-shelf cameras is a challenging problem. Recently, promising algorithms for appearance-based gaze estimation using convolutional neural networks (CNN) have been proposed. Improving their robustness to various confounding factors including variable head pose, subject identity, illumination and image quality remain open problems. In this work, we study the effect of variable head pose on machine learning regressors trained to estimate gaze direction. We propose a novel branched CNN architecture that improves the robustness of gaze classifiers to variable head pose, without increasing computational cost. We also present various procedures to effectively train our gaze network including transfer learning from the more closely related task of object viewpoint estimation and from a large high-fidelity synthetic gaze dataset, which enable our ten times faster gaze network to achieve competitive accuracy to its current state-of-the-art direct competitor.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Unconstrained remote gaze tracking using off-the-shelf cameras is a challenging problem. Recently, promising algorithms for appearance-based gaze estimation using convolutional neural networks (CNN) have been proposed. Improving their robustness to various confounding factors including variable head pose, subject identity, illumination and image quality remain open problems. In this work, we study the effect of variable head pose on machine learning regressors trained to estimate gaze direction. We propose a novel branched CNN architecture that improves the robustness of gaze classifiers to variable head pose, without increasing computational cost. We also present various procedures to effectively train our gaze network including transfer learning from the more closely related task of object viewpoint estimation and from a large high-fidelity synthetic gaze dataset, which enable our ten times faster gaze network to achieve competitive accuracy to its current state-of-the-art direct competitor.",
"fno": "610000c237",
"keywords": [
"Cameras",
"Gaze Tracking",
"Image Classification",
"Learning Artificial Intelligence",
"Neural Nets",
"Pose Estimation",
"Regression Analysis",
"Transfer Learning",
"Gaze Network",
"Machine Learning Regressors",
"Variable Head Pose",
"Branched CNN Architecture",
"Direct Competitor",
"Light Weight Head Pose Invariant Gaze Tracking",
"High Fidelity Synthetic Gaze Dataset",
"Object Viewpoint Estimation",
"Gaze Classifiers",
"Gaze Direction",
"Image Quality",
"Illumination",
"Variable Head",
"Convolutional Neural Networks",
"Appearance Based Gaze Estimation",
"Off The Shelf Cameras",
"Unconstrained Remote Gaze Tracking",
"Head",
"Magnetic Heads",
"Estimation",
"Gaze Tracking",
"Cameras",
"Training",
"Iris"
],
"authors": [
{
"affiliation": null,
"fullName": "Rajeev Ranjan",
"givenName": "Rajeev",
"surname": "Ranjan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shalini De Mello",
"givenName": "Shalini",
"surname": "De Mello",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jan Kautz",
"givenName": "Jan",
"surname": "Kautz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2237-22378",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6100-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "610000c229",
"articleId": "17D45Xh13wq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "610000c246",
"articleId": "17D45Wuc3bk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2012/1611/0/06239182",
"title": "Gaze estimation from multimodal Kinect data",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239182/12OmNA2cYDO",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2011/4419/0/4419a186",
"title": "The Importance of Eye Gaze and Head Pose to Estimating Levels of Attention",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2011/4419a186/12OmNqyDjtb",
"parentPublication": {
"id": "proceedings/vs-games/2011/4419/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d162",
"title": "Monocular Free-Head 3D Gaze Tracking with Deep Learning and Geometry Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d162/12OmNxbmSBT",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2016/03/mex2016030049",
"title": "Driver Gaze Region Estimation without Use of Eye Movement",
"doi": null,
"abstractUrl": "/magazine/ex/2016/03/mex2016030049/13rRUwInv0D",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2017/1235/0/08457962",
"title": "Gaze Tracking in 3D Space with a Convolution Neural Network “See What I See”",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2017/08457962/13xI8AAc3rr",
"parentPublication": {
"id": "proceedings/aipr/2017/1235/0",
"title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/01/08122058",
"title": "MPIIGaze: Real-World Dataset and Deep Appearance-Based Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2019/01/08122058/17D45WZZ7E5",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10061572",
"title": "Free-HeadGAN: Neural Talking Head Synthesis with Explicit Gaze Control",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10061572/1Lk2C6ZD2zC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1PhUp98k",
"doi": "10.1109/ISMAR-Adjunct.2018.00038",
"title": "Do You Know What I Mean? An MR-Based Collaborative Platform",
"normalizedTitle": "Do You Know What I Mean? An MR-Based Collaborative Platform",
"abstract": "The Mixed Reality (MR) technology can be used to create unique collaborative experiences. In this paper, we propose a new remote collaboration platform using MR and eye-tracking that enables a remote helper to assist a local worker in an assembly task. We present results from research exploring the effect of sharing virtual gaze and annotations cues in an MR-based projector interface for remote collaboration. The key advantage compared to other remote collaborative MR interfaces is that it projects the remote expert's eye gaze into the real worksite to improve co-presence. The prototype system was evaluated with a pilot study comparing two conditions: POINTER and ET (eye-tracker cues). We observed that the task completion performance was better in the ET condition. And that sharing gaze significantly improved the awareness of each other's focus and co-presence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Mixed Reality (MR) technology can be used to create unique collaborative experiences. In this paper, we propose a new remote collaboration platform using MR and eye-tracking that enables a remote helper to assist a local worker in an assembly task. We present results from research exploring the effect of sharing virtual gaze and annotations cues in an MR-based projector interface for remote collaboration. The key advantage compared to other remote collaborative MR interfaces is that it projects the remote expert's eye gaze into the real worksite to improve co-presence. The prototype system was evaluated with a pilot study comparing two conditions: POINTER and ET (eye-tracker cues). We observed that the task completion performance was better in the ET condition. And that sharing gaze significantly improved the awareness of each other's focus and co-presence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Mixed Reality (MR) technology can be used to create unique collaborative experiences. In this paper, we propose a new remote collaboration platform using MR and eye-tracking that enables a remote helper to assist a local worker in an assembly task. We present results from research exploring the effect of sharing virtual gaze and annotations cues in an MR-based projector interface for remote collaboration. The key advantage compared to other remote collaborative MR interfaces is that it projects the remote expert's eye gaze into the real worksite to improve co-presence. The prototype system was evaluated with a pilot study comparing two conditions: POINTER and ET (eye-tracker cues). We observed that the task completion performance was better in the ET condition. And that sharing gaze significantly improved the awareness of each other's focus and co-presence.",
"fno": "08699227",
"keywords": [
"Augmented Reality",
"Gaze Tracking",
"Groupware",
"Object Tracking",
"User Interfaces",
"Video Signal Processing",
"Virtual Reality",
"Collaborative Platform",
"Unique Collaborative Experiences",
"Remote Collaboration Platform",
"Eye Tracking",
"Remote Helper",
"Local Worker",
"Assembly Task",
"Virtual Gaze",
"Annotations Cues",
"Projector Interface",
"Remote Collaborative MR Interfaces",
"Remote Expert",
"Eye Tracker Cues",
"Task Completion Performance",
"Mixed Reality Technology",
"ET",
"POINTER",
"MR",
"Augmented Reality",
"Remote Collaboration",
"Eye Gaze",
"Augmented Reality",
"Mixed Reality",
"H 5 1 User Experience Design Collaborative Interfaces X 2014 Collaboration Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Lab.",
"fullName": "Peng Wang",
"givenName": "Peng",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Lab.",
"fullName": "Shusheng Zhang",
"givenName": "Shusheng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Lab.",
"fullName": "Xiaoliang Bai",
"givenName": "Xiaoliang",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia, Empathio Computing Lab.",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Lab.",
"fullName": "Weiping He",
"givenName": "Weiping",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Lab.",
"fullName": "Li Zhang",
"givenName": "Li",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Lab.",
"fullName": "Jiaxiang Du",
"givenName": "Jiaxiang",
"surname": "Du",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Lab.",
"fullName": "Shuxia Wang",
"givenName": "Shuxia",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "77-78",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699263",
"articleId": "19F1OYkEmWs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699248",
"articleId": "19F1R5RaLFS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480753",
"title": "Symmetric Model of Remote Collaborative MR Using Tangible Replicas",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480753/12OmNyL0TDr",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523400",
"title": "Do You See What I See? The Effect of Gaze Tracking on Task Space Remote Collaboration",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523400/13rRUy0HYRu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a153",
"title": "A User Study on MR Remote Collaboration Using Live 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a153/17D45VsBU1V",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a250",
"title": "Using Speech to Visualise Shared Gaze Cues in MR Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a250/1CJcnpSVomk",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798024",
"title": "Head Pointer or Eye Gaze: Which Helps More in MR Remote Collaboration?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798024/1cJ0MmguvG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798128",
"title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a393",
"title": "Wearable RemoteFusion: A Mixed Reality Remote Collaboration System with Local Eye Gaze and Remote Hand Gesture Sharing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a393/1gysjIlsYus",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a473",
"title": "The Impact of Gaze Cues in Mixed Reality Collaborations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a473/1yeQCejb7Co",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a159",
"title": "Exploring the Effect of Visual Cues on Eye Gaze During AR-Guided Picking and Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a159/1yeQM18rD7G",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrQQ8dsLKM",
"doi": "10.1109/ISMAR55827.2022.00022",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"normalizedTitle": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"abstract": "High-accuracy, low-latency gaze tracking is becoming one of the indispensable features in augmented reality (AR) head-mounted devices (HMDs). Researchers have proposed different approaches to predict gaze positions from eye images. However, since only the eye modality is focused, these appearance-based algorithms are still struggle to trade off the accuracy and running speed in HMDs. In this paper, we propose a lightweight multi-modal network (HE-Tracker) to regress gaze positions. By fusing head-movement features with eye features, HE-Tracker achieves comparable accuracy (3.655° in all subjects) and Z_$27 \\times$_Z speedup (48 fps in the specialized AR HMD) compared to the state-of-the-art gaze tracking algorithm. We further demonstrate that when applying our head-eye coordination strategy to other baseline models, all these models achieve at least 6.36% performance improvement without a pronounced effect on running speed. Moreover, we construct HE-Gaze, the first multi-modal dataset with eye images and head-movement data for near-eye gaze tracking. This dataset is currently made of 757,360 frames and 15 persons, providing an opportunity to foster research in multi-modal gaze tracking approaches. Our dataset is available at DOWNLOAD LINK <sup>1</sup>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "High-accuracy, low-latency gaze tracking is becoming one of the indispensable features in augmented reality (AR) head-mounted devices (HMDs). Researchers have proposed different approaches to predict gaze positions from eye images. However, since only the eye modality is focused, these appearance-based algorithms are still struggle to trade off the accuracy and running speed in HMDs. In this paper, we propose a lightweight multi-modal network (HE-Tracker) to regress gaze positions. By fusing head-movement features with eye features, HE-Tracker achieves comparable accuracy (3.655° in all subjects) and $27 \\times$ speedup (48 fps in the specialized AR HMD) compared to the state-of-the-art gaze tracking algorithm. We further demonstrate that when applying our head-eye coordination strategy to other baseline models, all these models achieve at least 6.36% performance improvement without a pronounced effect on running speed. Moreover, we construct HE-Gaze, the first multi-modal dataset with eye images and head-movement data for near-eye gaze tracking. This dataset is currently made of 757,360 frames and 15 persons, providing an opportunity to foster research in multi-modal gaze tracking approaches. Our dataset is available at DOWNLOAD LINK <sup>1</sup>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High-accuracy, low-latency gaze tracking is becoming one of the indispensable features in augmented reality (AR) head-mounted devices (HMDs). Researchers have proposed different approaches to predict gaze positions from eye images. However, since only the eye modality is focused, these appearance-based algorithms are still struggle to trade off the accuracy and running speed in HMDs. In this paper, we propose a lightweight multi-modal network (HE-Tracker) to regress gaze positions. By fusing head-movement features with eye features, HE-Tracker achieves comparable accuracy (3.655° in all subjects) and - speedup (48 fps in the specialized AR HMD) compared to the state-of-the-art gaze tracking algorithm. We further demonstrate that when applying our head-eye coordination strategy to other baseline models, all these models achieve at least 6.36% performance improvement without a pronounced effect on running speed. Moreover, we construct HE-Gaze, the first multi-modal dataset with eye images and head-movement data for near-eye gaze tracking. This dataset is currently made of 757,360 frames and 15 persons, providing an opportunity to foster research in multi-modal gaze tracking approaches. Our dataset is available at DOWNLOAD LINK 1.",
"fno": "532500a082",
"keywords": [
"Augmented Reality",
"Eye",
"Feature Extraction",
"Gaze Tracking",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Object Tracking",
"Appearance Based Algorithms",
"Augmented Reality Head Mounted Devices",
"Eye Features",
"Eye Images",
"Eye Modality",
"Gaze Positions",
"Gaze Tracking Approaches",
"HE Gaze",
"HE Tracker",
"Head Eye Coordination Strategy",
"Head Mounted Displays",
"Head Movement Data",
"Head Movement Features",
"HMD",
"Indispensable Features",
"Multimodal Dataset",
"Multimodal Network",
"Near Eye Gaze Tracking",
"Performance Improvement",
"Running Speed",
"State Of The Art Gaze",
"Performance Evaluation",
"Head",
"Gaze Tracking",
"Resists",
"Predictive Models",
"Rendering Computer Graphics",
"Prediction Algorithms",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Augmented Reality",
"Computing Methodologies",
"Artificial Intelligence",
"Computer Vision",
"Computer Vision Problems"
],
"authors": [
{
"affiliation": "Hebei University of Technology,School of Artificial Intelligence and Data Science",
"fullName": "Lingling Chen",
"givenName": "Lingling",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hebei University of Technology,School of Artificial Intelligence and Data Science",
"fullName": "Yingxi Li",
"givenName": "Yingxi",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Defense Innovation Institute,Academy of Military Sciences,Beijing,China",
"fullName": "Xiaowei Bai",
"givenName": "Xiaowei",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin Artificial Intelligence Innovation Center,Tianjin,China",
"fullName": "Xiaodong Wang",
"givenName": "Xiaodong",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin Artificial Intelligence Innovation Center,Tianjin,China",
"fullName": "Yongqiang Hu",
"givenName": "Yongqiang",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin Artificial Intelligence Innovation Center,Tianjin,China",
"fullName": "Mingwu Song",
"givenName": "Mingwu",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Defense Innovation Institute,Academy of Military Sciences,Beijing,China",
"fullName": "Liang Xie",
"givenName": "Liang",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Defense Innovation Institute,Academy of Military Sciences,Beijing,China",
"fullName": "Ye Yan",
"givenName": "Ye",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Defense Innovation Institute,Academy of Military Sciences,Beijing,China",
"fullName": "Erwei Yin",
"givenName": "Erwei",
"surname": "Yin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "82-91",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "532500a074",
"articleId": "1JrRaeV82L6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a092",
"articleId": "1JrQS43SrFC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223443",
"title": "Non-obscuring binocular eye tracking for wide field-of-view head-mounted-displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223443/12OmNqzu6MP",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926684",
"title": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926684/12OmNvlxJrb",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d162",
"title": "Monocular Free-Head 3D Gaze Tracking with Deep Learning and Geometry Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d162/12OmNxbmSBT",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a375",
"title": "Neural 3D Gaze: 3D Pupil Localization and Gaze Tracking based on Anatomical Eye Model and Neural Refraction Correction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a375/1JrQRCijhMk",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797852",
"title": "Perception of Volumetric Characters' Eye-Gaze Direction in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389490",
"title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a011",
"title": "Edge-Guided Near-Eye Image Analysis for Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a011/1yeCW4N7Y9a",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnY5akLwvS",
"doi": "10.1109/VRW52623.2021.00159",
"title": "Estimating Gaze From Head and Hand Pose and Scene Images for Open-Ended Exploration in VR Environments",
"normalizedTitle": "Estimating Gaze From Head and Hand Pose and Scene Images for Open-Ended Exploration in VR Environments",
"abstract": "The widespread utility of eye tracking technology has created a growing demand for more consistent and reliable eye-tracking systems, and there is a need for new and accessible approaches that can enhance the accuracy of eye-tracking data. Previous studies have offered evidence for associations between certain non-eye signals and gaze such as a strong coordination between head motion and gaze shifts. e.g. [3] , hand and eye spatiotemporal statistics, e.g. [7] , and gaze behavior and scene content, e.g. [2] . Previous studies have also shown how various combinations of eye, head, scene, and hand signals can be leveraged for applications such as gaze estimation [5] , [10] , prediction [8] , and classification [6] . Though these previous approaches provide support for the idea that non-eye sensors (i.e. head, hand, and scene) are useful for estimating gaze, they have not yet fully addressed how these signals individually and in combination contribute to gaze estimation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The widespread utility of eye tracking technology has created a growing demand for more consistent and reliable eye-tracking systems, and there is a need for new and accessible approaches that can enhance the accuracy of eye-tracking data. Previous studies have offered evidence for associations between certain non-eye signals and gaze such as a strong coordination between head motion and gaze shifts. e.g. [3] , hand and eye spatiotemporal statistics, e.g. [7] , and gaze behavior and scene content, e.g. [2] . Previous studies have also shown how various combinations of eye, head, scene, and hand signals can be leveraged for applications such as gaze estimation [5] , [10] , prediction [8] , and classification [6] . Though these previous approaches provide support for the idea that non-eye sensors (i.e. head, hand, and scene) are useful for estimating gaze, they have not yet fully addressed how these signals individually and in combination contribute to gaze estimation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The widespread utility of eye tracking technology has created a growing demand for more consistent and reliable eye-tracking systems, and there is a need for new and accessible approaches that can enhance the accuracy of eye-tracking data. Previous studies have offered evidence for associations between certain non-eye signals and gaze such as a strong coordination between head motion and gaze shifts. e.g. [3] , hand and eye spatiotemporal statistics, e.g. [7] , and gaze behavior and scene content, e.g. [2] . Previous studies have also shown how various combinations of eye, head, scene, and hand signals can be leveraged for applications such as gaze estimation [5] , [10] , prediction [8] , and classification [6] . Though these previous approaches provide support for the idea that non-eye sensors (i.e. head, hand, and scene) are useful for estimating gaze, they have not yet fully addressed how these signals individually and in combination contribute to gaze estimation.",
"fno": "405700a554",
"keywords": [
"Eye",
"Gaze Tracking",
"Human Computer Interaction",
"Virtual Reality",
"Noneye Sensors",
"Gaze Estimation",
"Hand Pose",
"Scene Images",
"Open Ended Exploration",
"VR Environments",
"Widespread Utility",
"Eye Tracking Technology",
"Consistent Eye Tracking Systems",
"Reliable Eye Tracking Systems",
"Accessible Approaches",
"Eye Tracking Data",
"Noneye Signals",
"Head Motion",
"Gaze Behavior",
"Scene Content",
"Hand Signals",
"Head",
"Three Dimensional Displays",
"Conferences",
"Estimation",
"Virtual Reality",
"Gaze Tracking",
"User Interfaces",
"Eye Tracking",
"Gaze Estimation",
"Virtual Reality",
"Non Eye Sensors"
],
"authors": [
{
"affiliation": "University of Nevada, Reno,Facebook Reality Labs",
"fullName": "Kara J. Emery",
"givenName": "Kara J.",
"surname": "Emery",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Facebook Reality Labs",
"fullName": "Marina Zannoli",
"givenName": "Marina",
"surname": "Zannoli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Facebook Reality Labs",
"fullName": "Lei Xiao",
"givenName": "Lei",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Facebook Reality Labs",
"fullName": "James Warren",
"givenName": "James",
"surname": "Warren",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Facebook Reality Labs",
"fullName": "Sachin S. Talathi",
"givenName": "Sachin S.",
"surname": "Talathi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "554-555",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnY4ZCya8U",
"name": "pvrw202140570-09419188s1-mm_405700a554.zip",
"size": "24.8 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419188s1-mm_405700a554.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a552",
"articleId": "1tnWybIubwA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a556",
"articleId": "1tnXbEAaBdm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2018/2335/0/233501a789",
"title": "Human Computer Interaction with Head Pose, Eye Gaze and Body Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a789/12OmNASILS4",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2011/4419/0/4419a186",
"title": "The Importance of Eye Gaze and Head Pose to Estimating Levels of Attention",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2011/4419a186/12OmNqyDjtb",
"parentPublication": {
"id": "proceedings/vs-games/2011/4419/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d162",
"title": "Monocular Free-Head 3D Gaze Tracking with Deep Learning and Geometry Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d162/12OmNxbmSBT",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523400",
"title": "Do You See What I See? The Effect of Gaze Tracking on Task Space Remote Collaboration",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523400/13rRUy0HYRu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a787",
"title": "VRDoc: Gaze-based Interactions for VR Reading Experience",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a422",
"title": "Behavior Analysis of Indoor Escape Route-Finding Based on Head-Mounted VR and Eye Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a422/1ehBGoaPHhK",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxwENvc",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"acronym": "icassp",
"groupId": "1000002",
"volume": "3",
"displayVolume": "3",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAY79iu",
"doi": "10.1109/ICASSP.2004.1326642",
"title": "Spatial selectivity modulated just-noticeable-distortion profile for video",
"normalizedTitle": "Spatial selectivity modulated just-noticeable-distortion profile for video",
"abstract": "Both visual sensitivity and spatial selectivity determine the overall visibility threshold at each pixel in an image, according to the physiological and psychological evidence towards the human visual system (HVS). Visual sensitivity can be decided by an existing estimator for just-noticeable-distortion (JND). A computational model is proposed for incorporating a selectivity measure into the JND profile so that more effective noise shaping is possible in various applications. Experimental results with noise-embedded video sequences confirm that the introduction of spatial selectivity enhances the performance of the JND profile used in noise shaping.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Both visual sensitivity and spatial selectivity determine the overall visibility threshold at each pixel in an image, according to the physiological and psychological evidence towards the human visual system (HVS). Visual sensitivity can be decided by an existing estimator for just-noticeable-distortion (JND). A computational model is proposed for incorporating a selectivity measure into the JND profile so that more effective noise shaping is possible in various applications. Experimental results with noise-embedded video sequences confirm that the introduction of spatial selectivity enhances the performance of the JND profile used in noise shaping.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Both visual sensitivity and spatial selectivity determine the overall visibility threshold at each pixel in an image, according to the physiological and psychological evidence towards the human visual system (HVS). Visual sensitivity can be decided by an existing estimator for just-noticeable-distortion (JND). A computational model is proposed for incorporating a selectivity measure into the JND profile so that more effective noise shaping is possible in various applications. Experimental results with noise-embedded video sequences confirm that the introduction of spatial selectivity enhances the performance of the JND profile used in noise shaping.",
"fno": "01326642",
"keywords": [
"Random Noise",
"Video Signal Processing",
"Sensitivity",
"Parameter Estimation",
"Distortion",
"Image Sequences",
"Visual Perception",
"Spatial Selectivity",
"Just Noticeable Distortion Profile",
"Visual Sensitivity",
"Human Visual System",
"HVS",
"Noise Embedded Video Sequences",
"Noise Shaping",
"Psychology",
"Humans",
"Eyes",
"Optical Noise",
"Photoreceptors",
"Spatial Resolution",
"Machine Vision",
"Computational Modeling",
"Biomedical Optical Imaging"
],
"authors": [
{
"affiliation": "Inst. for Infocomm Res., Agency for Sci., Technol. & Res., Singapore, Singapore",
"fullName": "Zhongkang Lu",
"givenName": null,
"surname": "Zhongkang Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. for Infocomm Res., Agency for Sci., Technol. & Res., Singapore, Singapore",
"fullName": "Weisi Lin",
"givenName": null,
"surname": "Weisi Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. for Infocomm Res., Agency for Sci., Technol. & Res., Singapore, Singapore",
"fullName": "X.K. Yang",
"givenName": "X.K.",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. for Infocomm Res., Agency for Sci., Technol. & Res., Singapore, Singapore",
"fullName": "E.P. Ong",
"givenName": "E.P.",
"surname": "Ong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. for Infocomm Res., Agency for Sci., Technol. & Res., Singapore, Singapore",
"fullName": "S.S. Yao",
"givenName": "S.S.",
"surname": "Yao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icassp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-01-01T00:00:00",
"pubType": "proceedings",
"pages": "iii-705-8 vol.3",
"year": "2004",
"issn": "1520-6149",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01326641",
"articleId": "12OmNwEJ0Rd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01326643",
"articleId": "12OmNynsbCe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2010/7491/0/05583897",
"title": "Temporal color Just Noticeable Distortion model and its application for video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583897/12OmNC3Xhyw",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2008/3493/0/3493a260",
"title": "Just Noticeable Distortion Model and Its Application in Color Image Watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2008/3493a260/12OmNvTjZUm",
"parentPublication": {
"id": "proceedings/sitis/2008/3493/0",
"title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479908",
"title": "Passive and Active Kinesthetic Perception Just-noticeable-difference for Natural Frequency of Virtual Dynamic Systems",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479908/12OmNxA3Z3K",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583164",
"title": "Combined just noticeable difference model guided image watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583164/12OmNzWfp5c",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607587",
"title": "Spatial just noticeable distortion profile for image in DCT domain",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607587/12OmNzxgHuG",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07352354",
"title": "Just Noticeable Distortion Profile for Flat-Shaded 3D Mesh Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07352354/13rRUwhHcJn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551562",
"title": "Depth Masking Based Binocular Just-Noticeable-Distortion Model",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551562/17D45WaTknQ",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cniot/2022/6910/0/691000a146",
"title": "A Novel DCT-based Just Noticeable Difference Model for Videos Based on Structure Complexity",
"doi": null,
"abstractUrl": "/proceedings-article/cniot/2022/691000a146/1EOEenLEmm4",
"parentPublication": {
"id": "proceedings/cniot/2022/6910/0",
"title": "2022 3rd International Conference on Computing, Networks and Internet of Things (CNIOT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09105999",
"title": "Unsupervised Deep Learning for Just Noticeable Difference Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09105999/1kwqFoSN4Aw",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwCJOWF",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC3Xhyw",
"doi": "10.1109/ICME.2010.5583897",
"title": "Temporal color Just Noticeable Distortion model and its application for video coding",
"normalizedTitle": "Temporal color Just Noticeable Distortion model and its application for video coding",
"abstract": "Just Noticeable Distortion (JND), which is utilized to reduce the bit rate without introducing noticeable visual distortion, plays an important role in perceptual image and video processing. For the temporal color JND, it takes into account not only the spatial and luminance HVS properties, but also the temporal and chroma HVS properties. In this paper, we first develop a spatio-temporal model estimating JND for color video by completely incorporating the color CSF, the frequency property of DCT coefficient, the contrast masking effect and the motion property. Then we incorporate the JND model into video encoding system via the residue filtering process. This method can work with any prevalent video coding standards. To demonstrate the effectiveness of the JND model, we has implemented it into the H.264/AVC reference software JM12.4 and the experimental results show that the bit rate can be reduced by average 18.20%, which reflects that our JND model is able to exploit the HVS bounds more aggressively without introducing noticeable visual distortions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Just Noticeable Distortion (JND), which is utilized to reduce the bit rate without introducing noticeable visual distortion, plays an important role in perceptual image and video processing. For the temporal color JND, it takes into account not only the spatial and luminance HVS properties, but also the temporal and chroma HVS properties. In this paper, we first develop a spatio-temporal model estimating JND for color video by completely incorporating the color CSF, the frequency property of DCT coefficient, the contrast masking effect and the motion property. Then we incorporate the JND model into video encoding system via the residue filtering process. This method can work with any prevalent video coding standards. To demonstrate the effectiveness of the JND model, we has implemented it into the H.264/AVC reference software JM12.4 and the experimental results show that the bit rate can be reduced by average 18.20%, which reflects that our JND model is able to exploit the HVS bounds more aggressively without introducing noticeable visual distortions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Just Noticeable Distortion (JND), which is utilized to reduce the bit rate without introducing noticeable visual distortion, plays an important role in perceptual image and video processing. For the temporal color JND, it takes into account not only the spatial and luminance HVS properties, but also the temporal and chroma HVS properties. In this paper, we first develop a spatio-temporal model estimating JND for color video by completely incorporating the color CSF, the frequency property of DCT coefficient, the contrast masking effect and the motion property. Then we incorporate the JND model into video encoding system via the residue filtering process. This method can work with any prevalent video coding standards. To demonstrate the effectiveness of the JND model, we has implemented it into the H.264/AVC reference software JM12.4 and the experimental results show that the bit rate can be reduced by average 18.20%, which reflects that our JND model is able to exploit the HVS bounds more aggressively without introducing noticeable visual distortions.",
"fno": "05583897",
"keywords": [],
"authors": [
{
"affiliation": "National Engineering Research Center for Multimedia software, Wuhan University, Wuhan, China",
"fullName": "Hao Chen",
"givenName": "Hao",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Engineering Research Center for Multimedia software, Wuhan University, Wuhan, China",
"fullName": "Ruimin Hu",
"givenName": "Ruimin",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Engineering Research Center for Multimedia software, Wuhan University, Wuhan, China",
"fullName": "Jinhui Hu",
"givenName": "Jinhui",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Engineering Research Center for Multimedia software, Wuhan University, Wuhan, China",
"fullName": "Zhongyuan Wang",
"givenName": "Zhongyuan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-07-01T00:00:00",
"pubType": "proceedings",
"pages": "713-718",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-7491-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05583896",
"articleId": "12OmNBCqbzR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05583899",
"articleId": "12OmNwKoZeH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/3/01326642",
"title": "Spatial selectivity modulated just-noticeable-distortion profile for video",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326642/12OmNAY79iu",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2008/3493/0/3493a260",
"title": "Just Noticeable Distortion Model and Its Application in Color Image Watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2008/3493a260/12OmNvTjZUm",
"parentPublication": {
"id": "proceedings/sitis/2008/3493/0",
"title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2015/8660/0/8660a142",
"title": "3D Video Coding Using Just Noticeable Depth Difference Based on H.265/HEVC",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2015/8660a142/12OmNwbukia",
"parentPublication": {
"id": "proceedings/cis/2015/8660/0",
"title": "2015 11th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583164",
"title": "Combined just noticeable difference model guided image watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583164/12OmNzWfp5c",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786253",
"title": "Just Noticeable Difference Based Fast Coding Unit Partition in 3D-HEVC Intra Coding",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786253/12OmNzZEAp6",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07352354",
"title": "Just Noticeable Distortion Profile for Flat-Shaded 3D Mesh Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07352354/13rRUwhHcJn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551562",
"title": "Depth Masking Based Binocular Just-Noticeable-Distortion Model",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551562/17D45WaTknQ",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cniot/2022/6910/0/691000a146",
"title": "A Novel DCT-based Just Noticeable Difference Model for Videos Based on Structure Complexity",
"doi": null,
"abstractUrl": "/proceedings-article/cniot/2022/691000a146/1EOEenLEmm4",
"parentPublication": {
"id": "proceedings/cniot/2022/6910/0",
"title": "2022 3rd International Conference on Computing, Networks and Internet of Things (CNIOT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09105999",
"title": "Unsupervised Deep Learning for Just Noticeable Difference Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09105999/1kwqFoSN4Aw",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwekjuI",
"title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems",
"acronym": "sitis",
"groupId": "1002425",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvTjZUm",
"doi": "10.1109/SITIS.2008.67",
"title": "Just Noticeable Distortion Model and Its Application in Color Image Watermarking",
"normalizedTitle": "Just Noticeable Distortion Model and Its Application in Color Image Watermarking",
"abstract": "In this paper, a perceptually adaptive watermarking scheme for color images is proposed in order to achieve robustness and transparency. A new just no-ticeable distortion (JND) estimator for color images is first designed in the wavelet domain. The key issue of the JND model is to effectively integrate visual mask-ing effects. The estimator is an extension to the per-ceptual model that is used in image coding for gray-scale images. Except for the visual masking effects given coefficient by coefficient by taking into account the luminance content and the texture of grayscale images, the crossed masking effect given by the inter-action between luminance and chrominance compo-nents and the effect given by the variance within the local region of the target coefficient are investigated such that the visibility threshold for the human visual system (HVS) can be evaluated. In a locally adaptive fashion based on the wavelet decomposition, the esti-mator applies to all subbands of luminance and chrominance components of color images and is used to measure the visibility of wavelet quantization errors. The subband JND profiles are incorporated into a color image watermarking scheme. Performance in terms of robustness and transparency of the water-marking scheme is obtained by means of the proposed approach to embed the maximum strength watermark while maintaining the perceptually lossless quality of the watermarked color image. Simulation results show that the proposed scheme with inserting watermarks into luminance and chrominance components is more robust than the existing scheme while retaining the watermark transparency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a perceptually adaptive watermarking scheme for color images is proposed in order to achieve robustness and transparency. A new just no-ticeable distortion (JND) estimator for color images is first designed in the wavelet domain. The key issue of the JND model is to effectively integrate visual mask-ing effects. The estimator is an extension to the per-ceptual model that is used in image coding for gray-scale images. Except for the visual masking effects given coefficient by coefficient by taking into account the luminance content and the texture of grayscale images, the crossed masking effect given by the inter-action between luminance and chrominance compo-nents and the effect given by the variance within the local region of the target coefficient are investigated such that the visibility threshold for the human visual system (HVS) can be evaluated. In a locally adaptive fashion based on the wavelet decomposition, the esti-mator applies to all subbands of luminance and chrominance components of color images and is used to measure the visibility of wavelet quantization errors. The subband JND profiles are incorporated into a color image watermarking scheme. Performance in terms of robustness and transparency of the water-marking scheme is obtained by means of the proposed approach to embed the maximum strength watermark while maintaining the perceptually lossless quality of the watermarked color image. Simulation results show that the proposed scheme with inserting watermarks into luminance and chrominance components is more robust than the existing scheme while retaining the watermark transparency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a perceptually adaptive watermarking scheme for color images is proposed in order to achieve robustness and transparency. A new just no-ticeable distortion (JND) estimator for color images is first designed in the wavelet domain. The key issue of the JND model is to effectively integrate visual mask-ing effects. The estimator is an extension to the per-ceptual model that is used in image coding for gray-scale images. Except for the visual masking effects given coefficient by coefficient by taking into account the luminance content and the texture of grayscale images, the crossed masking effect given by the inter-action between luminance and chrominance compo-nents and the effect given by the variance within the local region of the target coefficient are investigated such that the visibility threshold for the human visual system (HVS) can be evaluated. In a locally adaptive fashion based on the wavelet decomposition, the esti-mator applies to all subbands of luminance and chrominance components of color images and is used to measure the visibility of wavelet quantization errors. The subband JND profiles are incorporated into a color image watermarking scheme. Performance in terms of robustness and transparency of the water-marking scheme is obtained by means of the proposed approach to embed the maximum strength watermark while maintaining the perceptually lossless quality of the watermarked color image. Simulation results show that the proposed scheme with inserting watermarks into luminance and chrominance components is more robust than the existing scheme while retaining the watermark transparency.",
"fno": "3493a260",
"keywords": [],
"authors": [
{
"affiliation": null,
"fullName": "Kuo-Cheng Liu",
"givenName": "Kuo-Cheng",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sitis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-11-01T00:00:00",
"pubType": "proceedings",
"pages": "260-267",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3493-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3493a254",
"articleId": "12OmNzwZ6m8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3493a277",
"articleId": "12OmNxGSm1Z",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/3/01326642",
"title": "Spatial selectivity modulated just-noticeable-distortion profile for video",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326642/12OmNAY79iu",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583897",
"title": "Temporal color Just Noticeable Distortion model and its application for video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583897/12OmNC3Xhyw",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2008/1913/0/04543994",
"title": "Color Enhancement in Image Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2008/04543994/12OmNxwENNb",
"parentPublication": {
"id": "proceedings/wacv/2008/1913/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/3/81833034",
"title": "Color Image Compression Using an Embedded Rate Scalable Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81833034/12OmNyz5JVG",
"parentPublication": {
"id": "proceedings/icip/1997/8183/3",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583164",
"title": "Combined just noticeable difference model guided image watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583164/12OmNzWfp5c",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607587",
"title": "Spatial just noticeable distortion profile for image in DCT domain",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607587/12OmNzxgHuG",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ias/2009/3744/2/3744b623",
"title": "Human Visual System Based Watermarking for Color Images",
"doi": null,
"abstractUrl": "/proceedings-article/ias/2009/3744b623/12OmNzxgHyG",
"parentPublication": {
"id": "proceedings/ias/2009/3744/2",
"title": "Information Assurance and Security, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07352354",
"title": "Just Noticeable Distortion Profile for Flat-Shaded 3D Mesh Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07352354/13rRUwhHcJn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551562",
"title": "Depth Masking Based Binocular Just-Noticeable-Distortion Model",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551562/17D45WaTknQ",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cniot/2022/6910/0/691000a146",
"title": "A Novel DCT-based Just Noticeable Difference Model for Videos Based on Structure Complexity",
"doi": null,
"abstractUrl": "/proceedings-article/cniot/2022/691000a146/1EOEenLEmm4",
"parentPublication": {
"id": "proceedings/cniot/2022/6910/0",
"title": "2022 3rd International Conference on Computing, Networks and Internet of Things (CNIOT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyO8tMM",
"title": "2016 Data Compression Conference (DCC)",
"acronym": "dcc",
"groupId": "1000177",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzZEAp6",
"doi": "10.1109/DCC.2016.46",
"title": "Just Noticeable Difference Based Fast Coding Unit Partition in 3D-HEVC Intra Coding",
"normalizedTitle": "Just Noticeable Difference Based Fast Coding Unit Partition in 3D-HEVC Intra Coding",
"abstract": "Summary form only given. This paper mainly studies currently developing 3D video coding based on HEVC. HEVC-based 3D video coding mainly focuses on 3DTV and auto-stereoscopic video compression system. A variety of new encoding tools, such as inter-view motion prediction and depth modeling modes, have been added in 3D-HEVC. Although 3D-HEVC provides greater bit rate saving, it also brings the enormous encoding complexity increase. The coding time is increased correspondingly. It is necessary to reduce the encoding time. In this paper, a fast CU-sized partition algorithm is proposed for 3D-HEVC intra coding. The key point of this algorithm is to find the relationship between the texture characteristic and the sub-partition in each CU. It needs to determine whether the LCU can be subdivided to smaller CU according to the relationship. In order to reduce the redundancy of the human eye, just noticeable difference (JND) is a high efficiency model in the base of psychology and physiology. Instead of the time-consuming rate distortion optimization for coding mode decision, the variance of JND in each CU can be exploited to partition the coding unit according to human visual system characteristics. In other words, the larger blocks with higher JND variance will be subdivided to smaller blocks with lower JND variance. Consequently, the rules of CU preliminary partition are decided as follows: (a) For a 64×64 CU, if the variance of JND is larger than 0.25, the CU will be sub-divided into four 32×32 sub-blocks. (b) For a 32×32 CU, if the variance of JND is larger than 0.15, the CU will be sub-divided into four 16×16 sub-blocks. (c) For a 16×16 CU, if the variance of JND is larger than 0.10, the CU will be sub-divided into four 8×8 sub-blocks. The proposed algorithm is implemented based on HTM-13.1 reference software. The experiment condition is set up as \"All Intra-Main\" (AI-Main) configuration [1]. The quantization parameter (QP) values of texture are set to 25, 30, 35 and 40, respectively and the corresponding QPs of depth can be set to 34,39,42,45. The experimental results show that the fast intra mode decision algorithm provides over 29.25% encoding time saving on average with comparable rate distortion performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given. This paper mainly studies currently developing 3D video coding based on HEVC. HEVC-based 3D video coding mainly focuses on 3DTV and auto-stereoscopic video compression system. A variety of new encoding tools, such as inter-view motion prediction and depth modeling modes, have been added in 3D-HEVC. Although 3D-HEVC provides greater bit rate saving, it also brings the enormous encoding complexity increase. The coding time is increased correspondingly. It is necessary to reduce the encoding time. In this paper, a fast CU-sized partition algorithm is proposed for 3D-HEVC intra coding. The key point of this algorithm is to find the relationship between the texture characteristic and the sub-partition in each CU. It needs to determine whether the LCU can be subdivided to smaller CU according to the relationship. In order to reduce the redundancy of the human eye, just noticeable difference (JND) is a high efficiency model in the base of psychology and physiology. Instead of the time-consuming rate distortion optimization for coding mode decision, the variance of JND in each CU can be exploited to partition the coding unit according to human visual system characteristics. In other words, the larger blocks with higher JND variance will be subdivided to smaller blocks with lower JND variance. Consequently, the rules of CU preliminary partition are decided as follows: (a) For a 64×64 CU, if the variance of JND is larger than 0.25, the CU will be sub-divided into four 32×32 sub-blocks. (b) For a 32×32 CU, if the variance of JND is larger than 0.15, the CU will be sub-divided into four 16×16 sub-blocks. (c) For a 16×16 CU, if the variance of JND is larger than 0.10, the CU will be sub-divided into four 8×8 sub-blocks. The proposed algorithm is implemented based on HTM-13.1 reference software. The experiment condition is set up as \"All Intra-Main\" (AI-Main) configuration [1]. The quantization parameter (QP) values of texture are set to 25, 30, 35 and 40, respectively and the corresponding QPs of depth can be set to 34,39,42,45. The experimental results show that the fast intra mode decision algorithm provides over 29.25% encoding time saving on average with comparable rate distortion performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given. This paper mainly studies currently developing 3D video coding based on HEVC. HEVC-based 3D video coding mainly focuses on 3DTV and auto-stereoscopic video compression system. A variety of new encoding tools, such as inter-view motion prediction and depth modeling modes, have been added in 3D-HEVC. Although 3D-HEVC provides greater bit rate saving, it also brings the enormous encoding complexity increase. The coding time is increased correspondingly. It is necessary to reduce the encoding time. In this paper, a fast CU-sized partition algorithm is proposed for 3D-HEVC intra coding. The key point of this algorithm is to find the relationship between the texture characteristic and the sub-partition in each CU. It needs to determine whether the LCU can be subdivided to smaller CU according to the relationship. In order to reduce the redundancy of the human eye, just noticeable difference (JND) is a high efficiency model in the base of psychology and physiology. Instead of the time-consuming rate distortion optimization for coding mode decision, the variance of JND in each CU can be exploited to partition the coding unit according to human visual system characteristics. In other words, the larger blocks with higher JND variance will be subdivided to smaller blocks with lower JND variance. Consequently, the rules of CU preliminary partition are decided as follows: (a) For a 64×64 CU, if the variance of JND is larger than 0.25, the CU will be sub-divided into four 32×32 sub-blocks. (b) For a 32×32 CU, if the variance of JND is larger than 0.15, the CU will be sub-divided into four 16×16 sub-blocks. (c) For a 16×16 CU, if the variance of JND is larger than 0.10, the CU will be sub-divided into four 8×8 sub-blocks. The proposed algorithm is implemented based on HTM-13.1 reference software. The experiment condition is set up as \"All Intra-Main\" (AI-Main) configuration [1]. The quantization parameter (QP) values of texture are set to 25, 30, 35 and 40, respectively and the corresponding QPs of depth can be set to 34,39,42,45. The experimental results show that the fast intra mode decision algorithm provides over 29.25% encoding time saving on average with comparable rate distortion performance.",
"fno": "07786253",
"keywords": [
"Image Texture",
"Psychology",
"Stereo Image Processing",
"Video Coding",
"3 D HEVC Intra Coding",
"HEVC Based 3 D Video Coding",
"3 DTV",
"Autostereoscopic Video Compression System",
"Encoding Tools",
"Texture Characteristic",
"LCU",
"Human Eye Redundancy",
"Psychology",
"Physiology",
"JND Variance",
"CU Sized Partition Algorithm",
"Human Visual System Characteristics",
"HTM 13 1 Reference Software",
"All Intramain Configuration",
"AI Main Configuration",
"Quantization Parameter",
"Rate Distortion Performance",
"Fast Intra Mode Decision Algorithm",
"Just Noticeable Difference Based Fast Coding Unit Partition",
"Encoding",
"Three Dimensional Displays",
"Video Coding",
"Partitioning Algorithms",
"Rate Distortion",
"Software Algorithms",
"Software"
],
"authors": [
{
"affiliation": null,
"fullName": "Hai Ren",
"givenName": "Hai",
"surname": "Ren",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Huihui Bai",
"givenName": "Huihui",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chunyu Lin",
"givenName": "Chunyu",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mengmeng Zhang",
"givenName": "Mengmeng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yao Zhao",
"givenName": "Yao",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "629-629",
"year": "2016",
"issn": "1068-0314",
"isbn": "978-1-5090-1853-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07786252",
"articleId": "12OmNwdtwj2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07786254",
"articleId": "12OmNy6ZrYp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cit/2017/0958/0/0958a093",
"title": "Fast Coding-Unit Mode Decision for HEVC Transrating",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2017/0958a093/12OmNBscCUq",
"parentPublication": {
"id": "proceedings/cit/2017/0958/0",
"title": "2017 IEEE International Conference on Computer and Information Technology (CIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2017/6721/0/07921915",
"title": "Fast Intra Coding Implementation for High Efficiency Video Coding (HEVC)",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2017/07921915/12OmNxA3YYq",
"parentPublication": {
"id": "proceedings/dcc/2017/6721/0",
"title": "2017 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2017/6721/0/07923725",
"title": "SATD Based Fast Intra Prediction for HEVC",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2017/07923725/12OmNxb5hvz",
"parentPublication": {
"id": "proceedings/dcc/2017/6721/0",
"title": "2017 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fcst/2015/9295/0/9295a316",
"title": "A Fast Coding Unit Depth Decision Algorithm for HEVC Inter Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2015/9295a316/12OmNyugz2E",
"parentPublication": {
"id": "proceedings/fcst/2015/9295/0",
"title": "2015 Ninth International Conference on Frontier of Computer Science and Technology (FCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890324",
"title": "Fast bi-partition mode selection for 3D HEVC depth intra coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890324/12OmNzcPAvW",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890647",
"title": "Fast coding unit depth decision for HEVC",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890647/12OmNzlUKQA",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2021/1815/0/181500a130",
"title": "ResNet Approach for Coding Unit Fast Splitting Decision of HEVC Intra Coding",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2021/181500a130/1CuhQsa9JF6",
"parentPublication": {
"id": "proceedings/dsc/2021/1815/0",
"title": "2021 IEEE Sixth International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ises/2022/9922/0/992200a548",
"title": "CTU Partition for Intra-Mode HEVC using Convolutional Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/ises/2022/992200a548/1KrgqiRrXuo",
"parentPublication": {
"id": "proceedings/ises/2022/9922/0",
"title": "2022 IEEE International Symposium on Smart Electronic Systems (iSES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102958",
"title": "Enhanced Cu Partitioning Search Method for Intra Coding in HEVC",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102958/1kwqUmArJvy",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2020/8577/0/09316455",
"title": "FCM-Based Fast Texture CU Size Decision Algorithm for 3D-HEVC Inter-Coding",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2020/09316455/1qmfDGLmE0w",
"parentPublication": {
"id": "proceedings/aiccsa/2020/8577/0",
"title": "2020 IEEE/ACS 17th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKa5Tk",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzxgHuG",
"doi": "10.1109/ICME.2008.4607587",
"title": "Spatial just noticeable distortion profile for image in DCT domain",
"normalizedTitle": "Spatial just noticeable distortion profile for image in DCT domain",
"abstract": "In this paper, a DCT based JND model for monochrome pictures is proposed. This model incorporates the spatial contrast sensitivity function (CSF), the luminance adaptation effect and the contrast masking effect based on block classification. Gamma correction is also considered to compensate the original luminance adaptation effect which gives more accurate results. Moreover, a psychophysical experiment was designed to parameterize our model. Experimental results show that the proposed model is consistent with the human visual system. Compared with the other JND profiles, the proposed model can tolerate more distortion and has much better perceptual quality. The proposed JND model can be easily applied in many related areas, such as compression, watermarking, error protection, perceptual distortion metric, and so on.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a DCT based JND model for monochrome pictures is proposed. This model incorporates the spatial contrast sensitivity function (CSF), the luminance adaptation effect and the contrast masking effect based on block classification. Gamma correction is also considered to compensate the original luminance adaptation effect which gives more accurate results. Moreover, a psychophysical experiment was designed to parameterize our model. Experimental results show that the proposed model is consistent with the human visual system. Compared with the other JND profiles, the proposed model can tolerate more distortion and has much better perceptual quality. The proposed JND model can be easily applied in many related areas, such as compression, watermarking, error protection, perceptual distortion metric, and so on.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a DCT based JND model for monochrome pictures is proposed. This model incorporates the spatial contrast sensitivity function (CSF), the luminance adaptation effect and the contrast masking effect based on block classification. Gamma correction is also considered to compensate the original luminance adaptation effect which gives more accurate results. Moreover, a psychophysical experiment was designed to parameterize our model. Experimental results show that the proposed model is consistent with the human visual system. Compared with the other JND profiles, the proposed model can tolerate more distortion and has much better perceptual quality. The proposed JND model can be easily applied in many related areas, such as compression, watermarking, error protection, perceptual distortion metric, and so on.",
"fno": "04607587",
"keywords": [
"Discrete Cosine Transforms",
"Distortion",
"Image Classification",
"Image Resolution",
"Visual Perception",
"Image Spatial Just Noticeable Distortion Profile",
"DCT Domain",
"Spatial Contrast Sensitivity Function",
"Luminance Adaptation Effect",
"Contrast Masking Effect",
"Block Classification",
"Gamma Correction",
"Human Visual System",
"Perceptual Quality",
"Discrete Cosine Transforms",
"Adaptation Model",
"Pixel",
"Visualization",
"Sensitivity",
"Visual System",
"Brightness",
"Contrast Sensitivity Function CSF",
"Gamma Correction",
"Just Noticeable Distortion JND"
],
"authors": [
{
"affiliation": "The Chinese University of Hong Kong, China",
"fullName": "Zhenyu Wei",
"givenName": null,
"surname": "Zhenyu Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Chinese University of Hong Kong, China",
"fullName": "King N. Ngan",
"givenName": "King N.",
"surname": "Ngan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-06-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1945-7871",
"isbn": "978-1-4244-2570-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04607586",
"articleId": "12OmNzgNY0z",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04607588",
"articleId": "12OmNwErpSo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/3/01326642",
"title": "Spatial selectivity modulated just-noticeable-distortion profile for video",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326642/12OmNAY79iu",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583897",
"title": "Temporal color Just Noticeable Distortion model and its application for video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583897/12OmNC3Xhyw",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2008/3493/0/3493a260",
"title": "Just Noticeable Distortion Model and Its Application in Color Image Watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2008/3493a260/12OmNvTjZUm",
"parentPublication": {
"id": "proceedings/sitis/2008/3493/0",
"title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583164",
"title": "Combined just noticeable difference model guided image watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583164/12OmNzWfp5c",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2013/2171/0/06746802",
"title": "A JND Profile Based on Hierarchically Selective Attention for Images",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2013/06746802/12OmNznkK1P",
"parentPublication": {
"id": "proceedings/ism/2013/2171/0",
"title": "2013 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07352354",
"title": "Just Noticeable Distortion Profile for Flat-Shaded 3D Mesh Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07352354/13rRUwhHcJn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551562",
"title": "Depth Masking Based Binocular Just-Noticeable-Distortion Model",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551562/17D45WaTknQ",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cniot/2022/6910/0/691000a146",
"title": "A Novel DCT-based Just Noticeable Difference Model for Videos Based on Structure Complexity",
"doi": null,
"abstractUrl": "/proceedings-article/cniot/2022/691000a146/1EOEenLEmm4",
"parentPublication": {
"id": "proceedings/cniot/2022/6910/0",
"title": "2022 3rd International Conference on Computing, Networks and Internet of Things (CNIOT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09105999",
"title": "Unsupervised Deep Learning for Just Noticeable Difference Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09105999/1kwqFoSN4Aw",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKir3",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WaTknQ",
"doi": "10.1109/ICMEW.2018.8551562",
"title": "Depth Masking Based Binocular Just-Noticeable-Distortion Model",
"normalizedTitle": "Depth Masking Based Binocular Just-Noticeable-Distortion Model",
"abstract": "2-D Just-Noticeable-Distortion (JND) models based on monocular vision properties are used to measure the just imperceptiule distortion threshold of an image. When a pair of stereoscopic images is presented to left and right eyes respectively, some uinocular vision properties begin to work, such as uinocular combination and rivalry. Zhao etc. proposed a uinocular JND (BJND) model in consideration of asymmetric noises in a pair of stereoscopic images. However, they only focused on the uinocular combination property and ignored the depth perception which is an essential binocular vision property. In this paper, we conduct psychophysical experiments to fmd how the disparity impact on depth masking. The results show that more noise could not be noticed under the optimized BJND model due to depth masking effect. The farther away the noises are, the less easy they are perceived. The optimized BJND model has been closer to the human visual perception.",
"abstracts": [
{
"abstractType": "Regular",
"content": "2-D Just-Noticeable-Distortion (JND) models based on monocular vision properties are used to measure the just imperceptiule distortion threshold of an image. When a pair of stereoscopic images is presented to left and right eyes respectively, some uinocular vision properties begin to work, such as uinocular combination and rivalry. Zhao etc. proposed a uinocular JND (BJND) model in consideration of asymmetric noises in a pair of stereoscopic images. However, they only focused on the uinocular combination property and ignored the depth perception which is an essential binocular vision property. In this paper, we conduct psychophysical experiments to fmd how the disparity impact on depth masking. The results show that more noise could not be noticed under the optimized BJND model due to depth masking effect. The farther away the noises are, the less easy they are perceived. The optimized BJND model has been closer to the human visual perception.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "2-D Just-Noticeable-Distortion (JND) models based on monocular vision properties are used to measure the just imperceptiule distortion threshold of an image. When a pair of stereoscopic images is presented to left and right eyes respectively, some uinocular vision properties begin to work, such as uinocular combination and rivalry. Zhao etc. proposed a uinocular JND (BJND) model in consideration of asymmetric noises in a pair of stereoscopic images. However, they only focused on the uinocular combination property and ignored the depth perception which is an essential binocular vision property. In this paper, we conduct psychophysical experiments to fmd how the disparity impact on depth masking. The results show that more noise could not be noticed under the optimized BJND model due to depth masking effect. The farther away the noises are, the less easy they are perceived. The optimized BJND model has been closer to the human visual perception.",
"fno": "08551562",
"keywords": [
"Depth Masking",
"BJND Model",
"Disparity",
"Binocular Suppression"
],
"authors": [
{
"affiliation": "School of Information Engineering, Communication University of China",
"fullName": "Kai Zheng",
"givenName": "Kai",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Engineering, Communication University of China",
"fullName": "Yana Zhang",
"givenName": "Yana",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Patent Examination Cooperation Sichuan Center of the Patent Office, SIPO",
"fullName": "Lingling Lv",
"givenName": "Lingling",
"surname": "Lv",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Engineering, Communication University of China",
"fullName": "Cheng Yang",
"givenName": "Cheng",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-4195-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08551499",
"articleId": "17D45WB0qdb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08551492",
"articleId": "17D45XDIXOS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/3/01326642",
"title": "Spatial selectivity modulated just-noticeable-distortion profile for video",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326642/12OmNAY79iu",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583897",
"title": "Temporal color Just Noticeable Distortion model and its application for video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583897/12OmNC3Xhyw",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2008/3493/0/3493a260",
"title": "Just Noticeable Distortion Model and Its Application in Color Image Watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2008/3493a260/12OmNvTjZUm",
"parentPublication": {
"id": "proceedings/sitis/2008/3493/0",
"title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583164",
"title": "Combined just noticeable difference model guided image watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583164/12OmNzWfp5c",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786253",
"title": "Just Noticeable Difference Based Fast Coding Unit Partition in 3D-HEVC Intra Coding",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786253/12OmNzZEAp6",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607587",
"title": "Spatial just noticeable distortion profile for image in DCT domain",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607587/12OmNzxgHuG",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07352354",
"title": "Just Noticeable Distortion Profile for Flat-Shaded 3D Mesh Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07352354/13rRUwhHcJn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cniot/2022/6910/0/691000a146",
"title": "A Novel DCT-based Just Noticeable Difference Model for Videos Based on Structure Complexity",
"doi": null,
"abstractUrl": "/proceedings-article/cniot/2022/691000a146/1EOEenLEmm4",
"parentPublication": {
"id": "proceedings/cniot/2022/6910/0",
"title": "2022 3rd International Conference on Computing, Networks and Internet of Things (CNIOT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09105999",
"title": "Unsupervised Deep Learning for Just Noticeable Difference Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09105999/1kwqFoSN4Aw",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1EOEbNrpskM",
"title": "2022 3rd International Conference on Computing, Networks and Internet of Things (CNIOT)",
"acronym": "cniot",
"groupId": "1847067",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1EOEenLEmm4",
"doi": "10.1109/CNIOT55862.2022.00033",
"title": "A Novel DCT-based Just Noticeable Difference Model for Videos Based on Structure Complexity",
"normalizedTitle": "A Novel DCT-based Just Noticeable Difference Model for Videos Based on Structure Complexity",
"abstract": "The Just Noticeable Difference (JND) model involves the minimum level of visibility where visual content can be distinguished, which plays a significant role in terms of the perceptual image/video. For the estimation of the JND threshold, the contrast masking effect evaluation is a critical task and has room for manoeuvre. Considering the important role of structural information for contrast masking evaluation, a structure complexity descriptor based on orientation selectivity characteristics of the human visual system (HVS) was introduced and a new contrast masking model based on structure complexity in the discrete cosine transformation (DCT) domain was estimated. Then, combining with the spatio-temporal CSF and luminance adaptation, a novel JND model for videos was proposed in the DCT domain. The experimental results of the subjective quality evaluation tests demonstrate that the proposed JND threshold can hide more noises under the same perceived quality, which is highly consistent with human subjective visual perception.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Just Noticeable Difference (JND) model involves the minimum level of visibility where visual content can be distinguished, which plays a significant role in terms of the perceptual image/video. For the estimation of the JND threshold, the contrast masking effect evaluation is a critical task and has room for manoeuvre. Considering the important role of structural information for contrast masking evaluation, a structure complexity descriptor based on orientation selectivity characteristics of the human visual system (HVS) was introduced and a new contrast masking model based on structure complexity in the discrete cosine transformation (DCT) domain was estimated. Then, combining with the spatio-temporal CSF and luminance adaptation, a novel JND model for videos was proposed in the DCT domain. The experimental results of the subjective quality evaluation tests demonstrate that the proposed JND threshold can hide more noises under the same perceived quality, which is highly consistent with human subjective visual perception.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Just Noticeable Difference (JND) model involves the minimum level of visibility where visual content can be distinguished, which plays a significant role in terms of the perceptual image/video. For the estimation of the JND threshold, the contrast masking effect evaluation is a critical task and has room for manoeuvre. Considering the important role of structural information for contrast masking evaluation, a structure complexity descriptor based on orientation selectivity characteristics of the human visual system (HVS) was introduced and a new contrast masking model based on structure complexity in the discrete cosine transformation (DCT) domain was estimated. Then, combining with the spatio-temporal CSF and luminance adaptation, a novel JND model for videos was proposed in the DCT domain. The experimental results of the subjective quality evaluation tests demonstrate that the proposed JND threshold can hide more noises under the same perceived quality, which is highly consistent with human subjective visual perception.",
"fno": "691000a146",
"keywords": [
"Brightness",
"Data Compression",
"Discrete Cosine Transforms",
"Image Coding",
"Image Segmentation",
"Image Texture",
"Spatiotemporal Phenomena",
"Visual Perception",
"Novel DCT Based Just Noticeable Difference Model",
"Visual Content",
"JND Threshold",
"Effect Evaluation",
"Structural Information",
"Contrast Masking Evaluation",
"Structure Complexity Descriptor",
"Orientation Selectivity Characteristics",
"Human Visual System",
"Contrast Masking Model",
"Discrete Cosine Transformation Domain",
"Luminance Adaptation",
"JND Model",
"DCT Domain",
"Subjective Quality Evaluation Tests",
"Human Subjective Visual Perception",
"Adaptation Models",
"Visualization",
"Computational Modeling",
"Estimation",
"Complexity Theory",
"Discrete Cosine Transforms",
"Internet Of Things",
"Just Noticeable Difference JND",
"Contrast Masking CM",
"Structure Complexity",
"Orientation Selectivity Mechanism OSM",
"Human Visual System HVS"
],
"authors": [
{
"affiliation": "School of Information, Technology and Engineering,Guangzhou College of Commerce,Guangzhou,China",
"fullName": "Hanxiao Xue",
"givenName": "Hanxiao",
"surname": "Xue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research Institute,Electronic Product Relibility and Environmental Testing,Guangzhou,China",
"fullName": "Wenfei Wan",
"givenName": "Wenfei",
"surname": "Wan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research Institute,Electronic Product Relibility and Environmental Testing,Guangzhou,China",
"fullName": "Shengyu Wei",
"givenName": "Shengyu",
"surname": "Wei",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cniot",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-05-01T00:00:00",
"pubType": "proceedings",
"pages": "146-150",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6910-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "691000a140",
"articleId": "1EOEhGHKFO0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "691000a151",
"articleId": "1EOEfk3WNzy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2010/7491/0/05583897",
"title": "Temporal color Just Noticeable Distortion model and its application for video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583897/12OmNC3Xhyw",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2008/3493/0/3493a260",
"title": "Just Noticeable Distortion Model and Its Application in Color Image Watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2008/3493a260/12OmNvTjZUm",
"parentPublication": {
"id": "proceedings/sitis/2008/3493/0",
"title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1994/5637/0/00305944",
"title": "Visibility of DCT basis functions: effects of contrast masking",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1994/00305944/12OmNwx3Q6S",
"parentPublication": {
"id": "proceedings/dcc/1994/5637/0",
"title": "Proceedings of IEEE Data Compression Conference (DCC'94)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479908",
"title": "Passive and Active Kinesthetic Perception Just-noticeable-difference for Natural Frequency of Virtual Dynamic Systems",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479908/12OmNxA3Z3K",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583164",
"title": "Combined just noticeable difference model guided image watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583164/12OmNzWfp5c",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607587",
"title": "Spatial just noticeable distortion profile for image in DCT domain",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607587/12OmNzxgHuG",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07352354",
"title": "Just Noticeable Distortion Profile for Flat-Shaded 3D Mesh Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07352354/13rRUwhHcJn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551562",
"title": "Depth Masking Based Binocular Just-Noticeable-Distortion Model",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551562/17D45WaTknQ",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09105999",
"title": "Unsupervised Deep Learning for Just Noticeable Difference Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09105999/1kwqFoSN4Aw",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1kwqyDCYmas",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1kwqFoSN4Aw",
"doi": "10.1109/ICMEW46912.2020.9105999",
"title": "Unsupervised Deep Learning for Just Noticeable Difference Estimation",
"normalizedTitle": "Unsupervised Deep Learning for Just Noticeable Difference Estimation",
"abstract": "Just noticeable difference (JND) estimates the visual redundancies of the human visual system (HVS), which has been widely applied in perceptual redundancy estimations in images and videos. Existing handcrafted feature based JND models are always inspired by some kind of HVS mechanisms, and have a limited performance for JND threshold estimation. Recently, deep learning has been widely used in various visual tasks and achieved notable success. However, deep learning is difficult to be applied for JND estimation, since it is impossible to build a large-scale dataset with pixellevel-label for training. In this paper, we propose an unsupervised learning based JND model. The underlying idea is to learn the visual redundancy characteristics of HVS by convolutional neural networks (CNN) without labeled data. Specifially, in order to optimize the parameters of the proposed model, three types of prior knowledge, i.e., the image quality, the pattern complexity, and the noise masking ability, are used to estimate the JND map of an image. Experiments demonstrate that the proposed model is highly consistent with the HVS and outperforms the existing JND models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Just noticeable difference (JND) estimates the visual redundancies of the human visual system (HVS), which has been widely applied in perceptual redundancy estimations in images and videos. Existing handcrafted feature based JND models are always inspired by some kind of HVS mechanisms, and have a limited performance for JND threshold estimation. Recently, deep learning has been widely used in various visual tasks and achieved notable success. However, deep learning is difficult to be applied for JND estimation, since it is impossible to build a large-scale dataset with pixellevel-label for training. In this paper, we propose an unsupervised learning based JND model. The underlying idea is to learn the visual redundancy characteristics of HVS by convolutional neural networks (CNN) without labeled data. Specifially, in order to optimize the parameters of the proposed model, three types of prior knowledge, i.e., the image quality, the pattern complexity, and the noise masking ability, are used to estimate the JND map of an image. Experiments demonstrate that the proposed model is highly consistent with the HVS and outperforms the existing JND models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Just noticeable difference (JND) estimates the visual redundancies of the human visual system (HVS), which has been widely applied in perceptual redundancy estimations in images and videos. Existing handcrafted feature based JND models are always inspired by some kind of HVS mechanisms, and have a limited performance for JND threshold estimation. Recently, deep learning has been widely used in various visual tasks and achieved notable success. However, deep learning is difficult to be applied for JND estimation, since it is impossible to build a large-scale dataset with pixellevel-label for training. In this paper, we propose an unsupervised learning based JND model. The underlying idea is to learn the visual redundancy characteristics of HVS by convolutional neural networks (CNN) without labeled data. Specifially, in order to optimize the parameters of the proposed model, three types of prior knowledge, i.e., the image quality, the pattern complexity, and the noise masking ability, are used to estimate the JND map of an image. Experiments demonstrate that the proposed model is highly consistent with the HVS and outperforms the existing JND models.",
"fno": "09105999",
"keywords": [
"Convolutional Neural Nets",
"Data Compression",
"Image Recognition",
"Unsupervised Learning",
"Video Coding",
"Visual Perception",
"Unsupervised Deep Learning",
"Noticeable Difference Estimation",
"Human Visual System",
"Perceptual Redundancy Estimations",
"JND Model",
"HVS Mechanisms",
"JND Threshold Estimation",
"Visual Redundancy Characteristics",
"Image Quality",
"JND Map",
"Convolutional Neural Networks",
"CNN",
"Pattern Complexity",
"Noise Masking Ability",
"Deep Learning",
"Training",
"Visualization",
"Redundancy",
"Estimation",
"Visual Systems",
"Complexity Theory",
"Just Noticeable Difference JND",
"Human Visual System HVS",
"Convolutional Neural Networks CNN"
],
"authors": [
{
"affiliation": "Xidian University,School of Artificial Intelligence,Shaanxi,China",
"fullName": "Yuhao Wu",
"givenName": "Yuhao",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xidian University,School of Artificial Intelligence,Shaanxi,China",
"fullName": "Weiping Ji",
"givenName": "Weiping",
"surname": "Ji",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xidian University,School of Artificial Intelligence,Shaanxi,China",
"fullName": "Jinjian Wu",
"givenName": "Jinjian",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-1485-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09105955",
"articleId": "1kwqEcvjNqo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09106031",
"articleId": "1kwqzpVU3za",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/3/01326642",
"title": "Spatial selectivity modulated just-noticeable-distortion profile for video",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326642/12OmNAY79iu",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583897",
"title": "Temporal color Just Noticeable Distortion model and its application for video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583897/12OmNC3Xhyw",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2008/3493/0/3493a260",
"title": "Just Noticeable Distortion Model and Its Application in Color Image Watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2008/3493a260/12OmNvTjZUm",
"parentPublication": {
"id": "proceedings/sitis/2008/3493/0",
"title": "2008 IEEE International Conference on Signal Image Technology and Internet Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479908",
"title": "Passive and Active Kinesthetic Perception Just-noticeable-difference for Natural Frequency of Virtual Dynamic Systems",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479908/12OmNxA3Z3K",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583164",
"title": "Combined just noticeable difference model guided image watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583164/12OmNzWfp5c",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786253",
"title": "Just Noticeable Difference Based Fast Coding Unit Partition in 3D-HEVC Intra Coding",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786253/12OmNzZEAp6",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607587",
"title": "Spatial just noticeable distortion profile for image in DCT domain",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607587/12OmNzxgHuG",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cniot/2022/6910/0/691000a146",
"title": "A Novel DCT-based Just Noticeable Difference Model for Videos Based on Structure Complexity",
"doi": null,
"abstractUrl": "/proceedings-article/cniot/2022/691000a146/1EOEenLEmm4",
"parentPublication": {
"id": "proceedings/cniot/2022/6910/0",
"title": "2022 3rd International Conference on Computing, Networks and Internet of Things (CNIOT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a001",
"title": "Estimating the Just Noticeable Difference of Tactile Feedback in Oculus Quest 2 Controllers",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a001/1JrRdMd6OZi",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx6g6nR",
"title": "2015 International Conference on Computer Science and Applications (CSA)",
"acronym": "csa",
"groupId": "1803775",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC4O4Fc",
"doi": "10.1109/CSA.2015.14",
"title": "Hand Segmentation Based on Improved Gaussian Mixture Model",
"normalizedTitle": "Hand Segmentation Based on Improved Gaussian Mixture Model",
"abstract": "In the process of human computer interaction, hand tracking and hand gesture recognition are of great importance. Hand segmentation is the first step of hand gesture recognition. Among several common methods, the background subtraction method is chosen for detecting moving hands. An improved Gaussian mixture model is used to establish the background model. In order to adapt to a changing scene, parameters of the background model are updated in real time by implementing an on-line K-means approximation. Then, the moving hands are segmented by the preset threshold. Experimental results demonstrate that the proposed method can segment hand region in a complex background. The proposed method can be used in the fields of human computer interaction and augmented reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the process of human computer interaction, hand tracking and hand gesture recognition are of great importance. Hand segmentation is the first step of hand gesture recognition. Among several common methods, the background subtraction method is chosen for detecting moving hands. An improved Gaussian mixture model is used to establish the background model. In order to adapt to a changing scene, parameters of the background model are updated in real time by implementing an on-line K-means approximation. Then, the moving hands are segmented by the preset threshold. Experimental results demonstrate that the proposed method can segment hand region in a complex background. The proposed method can be used in the fields of human computer interaction and augmented reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the process of human computer interaction, hand tracking and hand gesture recognition are of great importance. Hand segmentation is the first step of hand gesture recognition. Among several common methods, the background subtraction method is chosen for detecting moving hands. An improved Gaussian mixture model is used to establish the background model. In order to adapt to a changing scene, parameters of the background model are updated in real time by implementing an on-line K-means approximation. Then, the moving hands are segmented by the preset threshold. Experimental results demonstrate that the proposed method can segment hand region in a complex background. The proposed method can be used in the fields of human computer interaction and augmented reality.",
"fno": "9961a168",
"keywords": [
"Gaussian Distribution",
"Gaussian Mixture Model",
"Computational Modeling",
"Image Segmentation",
"Adaptation Models",
"Mixture Models",
"Background Modeling",
"Hand Segmentation",
"Gaussian Mixture Model",
"Moving Object Detection",
"Background Subtraction"
],
"authors": [
{
"affiliation": null,
"fullName": "Yi Zheng",
"givenName": "Yi",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ping Zheng",
"givenName": "Ping",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-11-01T00:00:00",
"pubType": "proceedings",
"pages": "168-171",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-9961-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9961a163",
"articleId": "12OmNBqMDBW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9961a172",
"articleId": "12OmNxisQQQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cicsyn/2011/4482/0/4482a231",
"title": "A Comparative Analysis of Segmentation Algorithms for Hand Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cicsyn/2011/4482a231/12OmNBOlloL",
"parentPublication": {
"id": "proceedings/cicsyn/2011/4482/0",
"title": "Computational Intelligence, Communication Systems and Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2009/3852/0/3852a323",
"title": "Player Detection Algorithm Based on Gaussian Mixture Models Background Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2009/3852a323/12OmNCd2rFh",
"parentPublication": {
"id": "proceedings/icinis/2009/3852/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2008/3381/0/3381a730",
"title": "An Improved Mixture Gaussian Models to Detect Moving Object Under Real-Time Complex Background",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/3381a730/12OmNs4S8BI",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a553",
"title": "Hand Detection Using Robust Color Correction and Gaussian Mixture Model",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a553/12OmNvrdI6d",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2017/3581/0/3581a704",
"title": "A Comparison Between Different Gaussian-Based Mixture Models",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2017/3581a704/12OmNwLOYWp",
"parentPublication": {
"id": "proceedings/aiccsa/2017/3581/0",
"title": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mines/2011/4559/0/4559a046",
"title": "The Improved Gaussian Mixture Model Based on Motion Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/mines/2011/4559a046/12OmNxWcH2w",
"parentPublication": {
"id": "proceedings/mines/2011/4559/0",
"title": "Multimedia Information Networking and Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2018/2659/0/265901a300",
"title": "Speaker Verification Using Adapted Bounded Gaussian Mixture Model",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2018/265901a300/12OmNxeutgm",
"parentPublication": {
"id": "proceedings/iri/2018/2659/0",
"title": "2018 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcabes/2015/6593/0/6593a340",
"title": "A Foreground-Background Segmentation Algorithm for Video Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2015/6593a340/12OmNxw5B36",
"parentPublication": {
"id": "proceedings/dcabes/2015/6593/0",
"title": "2015 14th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aici/2010/4225/2/4225b054",
"title": "A Robust Moving Objects Detection Based on Improved Gaussian Mixture Model",
"doi": null,
"abstractUrl": "/proceedings-article/aici/2010/4225b054/12OmNyNQSBd",
"parentPublication": {
"id": "proceedings/aici/2010/4225/2",
"title": "Artificial Intelligence and Computational Intelligence, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mediacom/2010/4136/0/4136a090",
"title": "An Improved Foreground Object Detection Method Based on Gaussian Mixture Models",
"doi": null,
"abstractUrl": "/proceedings-article/mediacom/2010/4136a090/12OmNzd7bE0",
"parentPublication": {
"id": "proceedings/mediacom/2010/4136/0",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrAdstl",
"title": "Computer Science and Engineering, International Workshop on",
"acronym": "iwcse",
"groupId": "1003091",
"volume": "1",
"displayVolume": "1",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC4wtAI",
"doi": "10.1109/WCSE.2009.741",
"title": "Salient Posture Modeling Based on Spatio-temporal Interesting Points",
"normalizedTitle": "Salient Posture Modeling Based on Spatio-temporal Interesting Points",
"abstract": "An action can be represented as a sequence of salient postures. Effective modeling of the salient postures is critical for robust action recognition. This paper proposes to characterize the salient postures using a set of the spatio-temporal interesting points (STIPs). Local features are extracted at each STIP and the statistical distribution of the features for each salient posture is further modelled by a Gaussian mixture model (GMM). Experimental results have verified the effectiveness of the proposed posture model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An action can be represented as a sequence of salient postures. Effective modeling of the salient postures is critical for robust action recognition. This paper proposes to characterize the salient postures using a set of the spatio-temporal interesting points (STIPs). Local features are extracted at each STIP and the statistical distribution of the features for each salient posture is further modelled by a Gaussian mixture model (GMM). Experimental results have verified the effectiveness of the proposed posture model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An action can be represented as a sequence of salient postures. Effective modeling of the salient postures is critical for robust action recognition. This paper proposes to characterize the salient postures using a set of the spatio-temporal interesting points (STIPs). Local features are extracted at each STIP and the statistical distribution of the features for each salient posture is further modelled by a Gaussian mixture model (GMM). Experimental results have verified the effectiveness of the proposed posture model.",
"fno": "3881a604",
"keywords": [
"Posture Modeling",
"STIP",
"GMM",
"Posture Index"
],
"authors": [
{
"affiliation": null,
"fullName": "Chuan-xu Wang",
"givenName": "Chuan-xu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yun Liu",
"givenName": "Yun",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wanqing Li",
"givenName": "Wanqing",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iwcse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-10-01T00:00:00",
"pubType": "proceedings",
"pages": "604-607",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3881-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3881a599",
"articleId": "12OmNxGSm1E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3881a608",
"articleId": "12OmNzEmFEl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccet/2009/3521/1/3521a466",
"title": "Non-temporal Mutliple Silhouettes in Hidden Markov Model for View Independent Posture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccet/2009/3521a466/12OmNANkofU",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ie/2011/4452/0/4452a047",
"title": "Accelerometer Placement for Posture Recognition and Fall Detection",
"doi": null,
"abstractUrl": "/proceedings-article/ie/2011/4452a047/12OmNAQJzLf",
"parentPublication": {
"id": "proceedings/ie/2011/4452/0",
"title": "Intelligent Environments, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isms/2010/3973/0/3973a209",
"title": "On the Use of Decision Tree for Posture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2010/3973a209/12OmNAle6I6",
"parentPublication": {
"id": "proceedings/isms/2010/3973/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020294",
"title": "A PCA/MDA Scheme for Hand Posture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020294/12OmNAlvHRj",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mvhi/2010/4009/0/4009a683",
"title": "Real-Time Hand Posture Recognition Using Haar-Like and Topological Feature",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a683/12OmNBDgZ3e",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020077",
"title": "Recognition of Human Body Posture from a Cloud of 3D Data Points using Wavelet Transform Coefficients",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020077/12OmNBbJTqo",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00324011",
"title": "Posture interpolation with collision avoidance",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00324011/12OmNCdk2A0",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2012/4779/0/4779a588",
"title": "Posture Recognition with G-Sensors on Smart Phones",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2012/4779a588/12OmNwE9OIE",
"parentPublication": {
"id": "proceedings/nbis/2012/4779/0",
"title": "2012 15th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d854",
"title": "Real-Time Upper-Limbs Posture Recognition Based on Particle Filters and AdaBoost Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d854/12OmNx57HP1",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543273",
"title": "Action recognition based on a bag of 3D points",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543273/12OmNzUxOhw",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAkEU4g",
"title": "2014 Ninth International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)",
"acronym": "bwcca",
"groupId": "1800183",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC943Nw",
"doi": "10.1109/BWCCA.2014.43",
"title": "Gaussian Process Machine Learning Based ITO Algorithm",
"normalizedTitle": "Gaussian Process Machine Learning Based ITO Algorithm",
"abstract": "Taking the Gaussian process (GP) regression model as ITO's fluctuation operator, we propose a new mixed algorithm called GITO in order to overcome the local minima problem. Through learning the particles' mobility models, ITO's capacity of local searching and global searching is strengthened. Meanwhile, we give the proof procedure to verify ITO's fluctuation operator and GP are logically equivalent. Finally, the experiments show GITO's better convergence rate and performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Taking the Gaussian process (GP) regression model as ITO's fluctuation operator, we propose a new mixed algorithm called GITO in order to overcome the local minima problem. Through learning the particles' mobility models, ITO's capacity of local searching and global searching is strengthened. Meanwhile, we give the proof procedure to verify ITO's fluctuation operator and GP are logically equivalent. Finally, the experiments show GITO's better convergence rate and performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Taking the Gaussian process (GP) regression model as ITO's fluctuation operator, we propose a new mixed algorithm called GITO in order to overcome the local minima problem. Through learning the particles' mobility models, ITO's capacity of local searching and global searching is strengthened. Meanwhile, we give the proof procedure to verify ITO's fluctuation operator and GP are logically equivalent. Finally, the experiments show GITO's better convergence rate and performance.",
"fno": "4173a038",
"keywords": [
"Gaussian Processes",
"Learning Artificial Intelligence",
"Regression Analysis",
"Search Problems",
"Proof Procedure",
"ITO Global Searching Capacity",
"ITO Local Searching Capacity",
"Particle Mobility Models",
"Local Minima Problem",
"GITO",
"Mixed Algorithm",
"ITO Fluctuation Operator",
"GP Regression Model",
"Gaussian Process Regression Model",
"Gaussian Process Machine Learning Based ITO Algorithm",
"Indium Tin Oxide",
"Algorithm Design And Analysis",
"Gaussian Processes",
"Heuristic Algorithms",
"Convergence",
"Computational Modeling",
"Adaptation Models",
"Gaussian Process",
"ITO",
"Fluctuation Ratio",
"Incremental Inheritance",
"Category Theory"
],
"authors": [
{
"affiliation": "Coll. of Comput. Sci. & Technol., Jilin Univ., Changchun, China",
"fullName": "Chuang Ma",
"givenName": "Chuang",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coll. of Comput. Sci. & Technol., Jilin Univ., Changchun, China",
"fullName": "Yongjian Yang",
"givenName": "Yongjian",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coll. of Comput. Sci. & Technol., Jilin Univ., Changchun, China",
"fullName": "Zhanwei Du",
"givenName": "Zhanwei",
"surname": "Du",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coll. of Manage. Sci. & Inf. Eng., Jilin Univ. of Finance & Econ., Changchun, China",
"fullName": "Chijun Zhang",
"givenName": "Chijun",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bwcca",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-11-01T00:00:00",
"pubType": "proceedings",
"pages": "38-41",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4173-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4173a034",
"articleId": "12OmNscxj8Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4173a042",
"articleId": "12OmNzIl3DP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2011/0394/0/05995688",
"title": "Generalized Gaussian process models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995688/12OmNx0RIR4",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/1/05743877",
"title": "Efficient reduction of Gaussian components using MDL criterion for HMM-based speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05743877/12OmNx965BP",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/1",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cinc/2009/3645/2/3645b174",
"title": "Accelerating Particle Swarm Optimization Algorithms Using Gaussian Process Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cinc/2009/3645b174/12OmNxxvAR5",
"parentPublication": {
"id": "cinc/2009/3645/2",
"title": "Computational Intelligence and Natural Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sensordevices/2010/4094/0/4094a130",
"title": "ITO Thin Films by RF Sputtering for Ethanol Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/sensordevices/2010/4094a130/12OmNy3RRvV",
"parentPublication": {
"id": "proceedings/sensordevices/2010/4094/0",
"title": "Sensor Device Technologies and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a085",
"title": "An Improved Fast Self-Comparison Algorithm for High-Speed Defect Detection of ITO Circuits",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a085/12OmNy6HQS5",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892e456",
"title": "IT Application Outsourcing in Europe: Long-Term Outcomes, Success Factors and Implications for ITO Maturity",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892e456/12OmNyjccBG",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f580",
"title": "Wrapped Gaussian Process Regression on Riemannian Manifolds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f580/17D45Xbl4Pl",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2022/9744/0/974400a042",
"title": "Transfer Learning for Regression through Adaptive Gaussian Process",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2022/974400a042/1MrFW4sFgME",
"parentPublication": {
"id": "proceedings/ictai/2022/9744/0",
"title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ises/2018/9172/0/917200a187",
"title": "Characterization of Thin Zirconia Films Deposited by ECD on ITO Coated Glass for Biosensing Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ises/2018/917200a187/1ap5cHo049y",
"parentPublication": {
"id": "proceedings/ises/2018/9172/0",
"title": "2018 IEEE International Symposium on Smart Electronic Systems (iSES) (Formerly iNiS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asew/2020/8128/0/812800a112",
"title": "NLP-based Enhancement of Information Security in ITO - A Diffusion of Innovation Theory perspective",
"doi": null,
"abstractUrl": "/proceedings-article/asew/2020/812800a112/1qyxMOkNxXW",
"parentPublication": {
"id": "proceedings/asew/2020/8128/0",
"title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering Workshops (ASEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy2agRS",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"acronym": "cad-graphics",
"groupId": "1001488",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwF0BZ5",
"doi": "10.1109/CADGraphics.2013.32",
"title": "Dynamic Human Surface Reconstruction Using a Single Kinect",
"normalizedTitle": "Dynamic Human Surface Reconstruction Using a Single Kinect",
"abstract": "This paper presents a system for robust dynamic human surface reconstruction using a single Kinect. The single Kinect provides a self-occluded and noisy RGBD data. Thus it is challenging to track the whole human surface robustly. To overcome both incompleteness and data noise, we adopt a template to confine the shape in the un-seen part, and propose a two-stage tracking pipeline. The first stage tracks the articulated motion of the human, which improves robustness of tracking by introducing more constraints between the surface points. The second stage tracks movements of non-articulated motion. For long sequences, we stabilize the human surface in the un-seen part by directly warping the surface from the first frame to the current frame according to sequentially tracked correspondences, preventing surface from collapsing caused by error accumulation. We demonstrate our method by several real captured RGBD data, containing complex human motion. The reconstruction results show the effectiveness and robustness of our method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a system for robust dynamic human surface reconstruction using a single Kinect. The single Kinect provides a self-occluded and noisy RGBD data. Thus it is challenging to track the whole human surface robustly. To overcome both incompleteness and data noise, we adopt a template to confine the shape in the un-seen part, and propose a two-stage tracking pipeline. The first stage tracks the articulated motion of the human, which improves robustness of tracking by introducing more constraints between the surface points. The second stage tracks movements of non-articulated motion. For long sequences, we stabilize the human surface in the un-seen part by directly warping the surface from the first frame to the current frame according to sequentially tracked correspondences, preventing surface from collapsing caused by error accumulation. We demonstrate our method by several real captured RGBD data, containing complex human motion. The reconstruction results show the effectiveness and robustness of our method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a system for robust dynamic human surface reconstruction using a single Kinect. The single Kinect provides a self-occluded and noisy RGBD data. Thus it is challenging to track the whole human surface robustly. To overcome both incompleteness and data noise, we adopt a template to confine the shape in the un-seen part, and propose a two-stage tracking pipeline. The first stage tracks the articulated motion of the human, which improves robustness of tracking by introducing more constraints between the surface points. The second stage tracks movements of non-articulated motion. For long sequences, we stabilize the human surface in the un-seen part by directly warping the surface from the first frame to the current frame according to sequentially tracked correspondences, preventing surface from collapsing caused by error accumulation. We demonstrate our method by several real captured RGBD data, containing complex human motion. The reconstruction results show the effectiveness and robustness of our method.",
"fno": "06814995",
"keywords": [
"Shape",
"Surface Reconstruction",
"Robustness",
"Image Reconstruction",
"Dynamics",
"Target Tracking",
"Human Body",
"Kinect",
"Dynamic Reconstruction"
],
"authors": [
{
"affiliation": null,
"fullName": "Ming Zeng",
"givenName": "Ming",
"surname": "Zeng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiaxiang Zheng",
"givenName": "Jiaxiang",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xuan Cheng",
"givenName": "Xuan",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bo Jiang",
"givenName": "Bo",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xinguo Liu",
"givenName": "Xinguo",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cad-graphics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-11-01T00:00:00",
"pubType": "proceedings",
"pages": "188-195",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2576-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06814994",
"articleId": "12OmNzgwmLw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06814996",
"articleId": "12OmNvT2p79",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2013/3022/0/3022a307",
"title": "Single-View RGBD-Based Reconstruction of Dynamic Human Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a307/12OmNAP1Z1Q",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a661",
"title": "RGBD Datasets: Past, Present and Future",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a661/12OmNApLGF0",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d083",
"title": "Robust Non-rigid Motion Tracking and Surface Reconstruction Using L0 Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d083/12OmNB9KHwl",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601a390",
"title": "Human Shape Capture and Tracking at Home",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a390/12OmNvrMUhd",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2016/2305/0/2305a154",
"title": "Compositing Real and Synthetic Images: Using Kinect and Fisheye Camera",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2016/2305a154/12OmNzICEP2",
"parentPublication": {
"id": "proceedings/nicoint/2016/2305/0",
"title": "2016 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a910",
"title": "BodyFusion: Real-Time Capture of Human Motion and Surface Geometry Using a Single Depth Camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a910/12OmNzT7Otl",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000h287",
"title": "DoubleFusion: Real-Time Capture of Human Performances with Inner Body Shapes from a Single Depth Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000h287/17D45VsBTWf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a012",
"title": "Surface Light Field Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a012/17D45WODasr",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08708933",
"title": "UnstructuredFusion: Realtime 4D Geometry and Texture Reconstruction Using Commercial RGBD Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08708933/19Q3hT6JyUg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h738",
"title": "DeepHuman: 3D Human Reconstruction From a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h738/1hQqtWYyufS",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvnwVnh",
"title": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)",
"acronym": "aiccsa",
"groupId": "1000146",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwLOYWp",
"doi": "10.1109/AICCSA.2017.108",
"title": "A Comparison Between Different Gaussian-Based Mixture Models",
"normalizedTitle": "A Comparison Between Different Gaussian-Based Mixture Models",
"abstract": "In this paper, we address the problem of data clustering into homogeneous components in an unsupervised way. Data clustering is one of the major topics in computer vision which has widespread potential applications from various domains such as pattern recognition, data mining, remote sensing, and bioinformatics. In pattern recognition, statistical methods have been widely used and proved effective in generating accurate models. In particular, the popular finite Gaussian mixture models which are able to provide superior performance for data clustering and classification. In this work, we present and evaluate the performance of four well-known Gaussian-based mixture models for data clustering namely: Gaussian mixture model (GMM), Generalized Gaussian mixture model (GGMM), Bounded Gaussian mixture model (BGMM) and Bounded Generalized Gaussian mixture model (BGGMM). The aim of this work is to show that the choice of the component model is very critical in mixture decomposition. Experimental results show close clustering accuracy between different models. However, the bounded generalized Gaussian mixture model provides the best performance in the case of multidimensional data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we address the problem of data clustering into homogeneous components in an unsupervised way. Data clustering is one of the major topics in computer vision which has widespread potential applications from various domains such as pattern recognition, data mining, remote sensing, and bioinformatics. In pattern recognition, statistical methods have been widely used and proved effective in generating accurate models. In particular, the popular finite Gaussian mixture models which are able to provide superior performance for data clustering and classification. In this work, we present and evaluate the performance of four well-known Gaussian-based mixture models for data clustering namely: Gaussian mixture model (GMM), Generalized Gaussian mixture model (GGMM), Bounded Gaussian mixture model (BGMM) and Bounded Generalized Gaussian mixture model (BGGMM). The aim of this work is to show that the choice of the component model is very critical in mixture decomposition. Experimental results show close clustering accuracy between different models. However, the bounded generalized Gaussian mixture model provides the best performance in the case of multidimensional data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we address the problem of data clustering into homogeneous components in an unsupervised way. Data clustering is one of the major topics in computer vision which has widespread potential applications from various domains such as pattern recognition, data mining, remote sensing, and bioinformatics. In pattern recognition, statistical methods have been widely used and proved effective in generating accurate models. In particular, the popular finite Gaussian mixture models which are able to provide superior performance for data clustering and classification. In this work, we present and evaluate the performance of four well-known Gaussian-based mixture models for data clustering namely: Gaussian mixture model (GMM), Generalized Gaussian mixture model (GGMM), Bounded Gaussian mixture model (BGMM) and Bounded Generalized Gaussian mixture model (BGGMM). The aim of this work is to show that the choice of the component model is very critical in mixture decomposition. Experimental results show close clustering accuracy between different models. However, the bounded generalized Gaussian mixture model provides the best performance in the case of multidimensional data.",
"fno": "3581a704",
"keywords": [
"Bayes Methods",
"Expectation Maximisation Algorithm",
"Gaussian Distribution",
"Gaussian Processes",
"Mixture Models",
"Pattern Clustering",
"Data Clustering",
"Component Model",
"Mixture Decomposition",
"Clustering Accuracy",
"Bounded Generalized Gaussian Mixture Model",
"Multidimensional Data",
"Finite Gaussian Mixture Models",
"Gaussian Mixture Model",
"Covariance Matrices",
"Data Models",
"Shape",
"Mixture Models",
"Gaussian Distribution",
"Gaussian Mixture Model",
"Data Clustering",
"Comparative Study",
"Expectation Maximization"
],
"authors": [
{
"affiliation": null,
"fullName": "Fatma Najar",
"givenName": "Fatma",
"surname": "Najar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sami Bourouis",
"givenName": "Sami",
"surname": "Bourouis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nizar Bouguila",
"givenName": "Nizar",
"surname": "Bouguila",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Safiya Belghith",
"givenName": "Safiya",
"surname": "Belghith",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aiccsa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "704-708",
"year": "2017",
"issn": "2161-5330",
"isbn": "978-1-5386-3581-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3581a696",
"articleId": "12OmNwpoFz2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3581a709",
"articleId": "12OmNx8wTno",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2016/5473/0/07838000",
"title": "Gaussian Component Based Index for GMMs",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2016/07838000/12OmNAlvHTi",
"parentPublication": {
"id": "proceedings/icdm/2016/5473/0",
"title": "2016 IEEE 16th International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2016/3906/0/3906a587",
"title": "Maximum Gaussian Mixture Model for Classification",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2016/3906a587/12OmNwBT1mL",
"parentPublication": {
"id": "proceedings/itme/2016/3906/0",
"title": "2016 8th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2018/2659/0/265901a300",
"title": "Speaker Verification Using Adapted Bounded Gaussian Mixture Model",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2018/265901a300/12OmNxeutgm",
"parentPublication": {
"id": "proceedings/iri/2018/2659/0",
"title": "2018 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a746",
"title": "Maximum Likelihood Estimation of Gaussian Mixture Models Using Particle Swarm Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a746/12OmNyKJiir",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2011/09/ttk2011091406",
"title": "Laplacian Regularized Gaussian Mixture Model for Data Clustering",
"doi": null,
"abstractUrl": "/journal/tk/2011/09/ttk2011091406/13rRUxBa5xB",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2017/11/07938761",
"title": "A Fully-Pipelined Hardware Design for Gaussian Mixture Models",
"doi": null,
"abstractUrl": "/journal/tc/2017/11/07938761/13rRUxjQyoA",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0/08726734",
"title": "A Finite Multi-Dimensional Generalized Gamma Mixture Model",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2018/08726734/1axfoS4brAQ",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0",
"title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csii-bcd/2017/3302/0/3302a125",
"title": "Intrusion Detection System Based on Gaussian Mixture Model Using Hadoop Framework",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csii-bcd/2017/3302a125/1cdOABXKFMs",
"parentPublication": {
"id": "proceedings/acit-csii-bcd/2017/3302/0",
"title": "2017 5th Intl Conf on Applied Computing and Information Technology/4th Intl Conf on Computational Science/Intelligence and Applied Informatics/2nd Intl Conf on Big Data, Cloud Computing, Data Science (ACIT-CSII-BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g439",
"title": "Deep Clustering by Gaussian Mixture Variational Autoencoders With Graph Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g439/1hVlkCHpXPi",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2020/1054/0/09191590",
"title": "Background Subtraction with a Hierarchical Pitman-Yor Process Mixture Model of Generalized Gaussian Distributions",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2020/09191590/1n0Iwo8Ifrq",
"parentPublication": {
"id": "proceedings/iri/2020/1054/0",
"title": "2020 IEEE 21st International Conference on Information Reuse and Integration for Data Science (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKiqL",
"title": "2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"acronym": "dasc-picom-datacom-cyberscitech",
"groupId": "1001364",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Xh13wO",
"doi": "10.1109/DASC/PiCom/DataCom/CyberSciTec.2018.00-16",
"title": "Dance Posture/Steps Classification Using 3D Joints from the Kinect Sensors",
"normalizedTitle": "Dance Posture/Steps Classification Using 3D Joints from the Kinect Sensors",
"abstract": "In this paper, we introduce a new framework for find out the dance postures from dance sequences. The data exploited are from 3D point joints as being estimated by the Kinect-II sensor. The analysis, instead of other traditional methods, assigns the correct label to each frame of the Kinect-derived sequence with respect to the actual posture involved. On the contrary, the conventional dance classification methods categorize the dances to their styles, type of derived from tempo and music rhythm. Seven classifiers are tested to assess the performance in posture classification. The selected classifiers are either probabilistic or use linear or non-linear kernels such as the Support Vector Machines. We have also included ensemble classification schemes to improve the analysis. Experiments are given to real-world dances focusing on Greek folkloric ones. The results are objectively assessed under the precision and recall metrics and provide a comparative framework.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we introduce a new framework for find out the dance postures from dance sequences. The data exploited are from 3D point joints as being estimated by the Kinect-II sensor. The analysis, instead of other traditional methods, assigns the correct label to each frame of the Kinect-derived sequence with respect to the actual posture involved. On the contrary, the conventional dance classification methods categorize the dances to their styles, type of derived from tempo and music rhythm. Seven classifiers are tested to assess the performance in posture classification. The selected classifiers are either probabilistic or use linear or non-linear kernels such as the Support Vector Machines. We have also included ensemble classification schemes to improve the analysis. Experiments are given to real-world dances focusing on Greek folkloric ones. The results are objectively assessed under the precision and recall metrics and provide a comparative framework.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we introduce a new framework for find out the dance postures from dance sequences. The data exploited are from 3D point joints as being estimated by the Kinect-II sensor. The analysis, instead of other traditional methods, assigns the correct label to each frame of the Kinect-derived sequence with respect to the actual posture involved. On the contrary, the conventional dance classification methods categorize the dances to their styles, type of derived from tempo and music rhythm. Seven classifiers are tested to assess the performance in posture classification. The selected classifiers are either probabilistic or use linear or non-linear kernels such as the Support Vector Machines. We have also included ensemble classification schemes to improve the analysis. Experiments are given to real-world dances focusing on Greek folkloric ones. The results are objectively assessed under the precision and recall metrics and provide a comparative framework.",
"fno": "751800a868",
"keywords": [
"Image Classification",
"Image Sequences",
"Support Vector Machines",
"Dance Postures",
"Dance Sequences",
"3 D Point Joints",
"Kinect II Sensor",
"Correct Label",
"Kinect Derived Sequence",
"Actual Posture",
"Tempo Music Rhythm",
"Posture Classification",
"Nonlinear Kernels",
"Ensemble Classification Schemes",
"Real World Dances",
"3 D Joints",
"Kinect Sensors",
"Dance Posture Steps Classification",
"Sensors",
"Legged Locomotion",
"IP Networks",
"Three Dimensional Displays",
"Cultural Differences",
"Skeleton",
"Classification Algorithms",
"Posture Identification Classification Of Dance Postures Comparative Analysis Of Classifiers Dance Sequence From Point Joints Captured By The Kinect II Sensor"
],
"authors": [
{
"affiliation": null,
"fullName": "Nikoloas Bakalos",
"givenName": "Nikoloas",
"surname": "Bakalos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Eftychios Protopapadakis",
"givenName": "Eftychios",
"surname": "Protopapadakis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Anastasios Doulamis",
"givenName": "Anastasios",
"surname": "Doulamis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nikolaos Doulamis",
"givenName": "Nikolaos",
"surname": "Doulamis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dasc-picom-datacom-cyberscitech",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-08-01T00:00:00",
"pubType": "proceedings",
"pages": "868-873",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7518-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "751800a862",
"articleId": "17D45W2Wyzn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "751800a874",
"articleId": "17D45WWzW7l",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/culture-computing/2013/5047/0/5047a074",
"title": "The Study of Taiwanese Indigenous Dance with Labanotation and an Application",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2013/5047a074/12OmNAY79cQ",
"parentPublication": {
"id": "proceedings/culture-computing/2013/5047/0",
"title": "2013 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsiit/2017/9899/0/9899a279",
"title": "Fall Detection Application using Kinect",
"doi": null,
"abstractUrl": "/proceedings-article/icsiit/2017/9899a279/12OmNC3XhcM",
"parentPublication": {
"id": "proceedings/icsiit/2017/9899/0",
"title": "2017 International Conference on Soft Computing, Intelligent System and Information Technology (ICSIIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2014/5569/0/06970210",
"title": "Modeling abstractions for dance digital libraries",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2014/06970210/12OmNCxbXJA",
"parentPublication": {
"id": "proceedings/jcdl/2014/5569/0",
"title": "2014 IEEE/ACM Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciip/2015/0148/0/07414774",
"title": "Bharatna̅~yam Adavu Recognition from Depth Data",
"doi": null,
"abstractUrl": "/proceedings-article/iciip/2015/07414774/12OmNwdtwk2",
"parentPublication": {
"id": "proceedings/iciip/2015/0148/0",
"title": "2015 Third International Conference on Image Information Processing (ICIIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ncvpripg/2011/4599/0/4599a041",
"title": "Annotating Dance Posture Images Using Multi Kernel Feature Combination",
"doi": null,
"abstractUrl": "/proceedings-article/ncvpripg/2011/4599a041/12OmNyqRnpl",
"parentPublication": {
"id": "proceedings/ncvpripg/2011/4599/0",
"title": "Computer Vision, Pattern Recognition, Image Processing and Graphics, National Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493410",
"title": "An Embodied Learning Game Using Kinect and Labanotation for Analysis and Visualization of Dance Kinesiology",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493410/14tNJmS4WAw",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545078",
"title": "Kinematics-based Extraction of Salient 3D Human Motion Data for Summarization of Choreographic Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545078/17D45WODarm",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrhciai/2022/9182/0/918200a150",
"title": "A Skeleton Posture Transfer Method from Kinect Capture",
"doi": null,
"abstractUrl": "/proceedings-article/vrhciai/2022/918200a150/1Lxffiey7xm",
"parentPublication": {
"id": "proceedings/vrhciai/2022/9182/0",
"title": "2022 International Conference on Virtual Reality, Human-Computer Interaction and Artificial Intelligence (VRHCIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864522",
"title": "Choreographic Pose Identification using Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864522/1e5ZrdQ8MQ8",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a331",
"title": "Hello, Chinese Ethnic and Folk Dances",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a331/1vg7FfaT7O0",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1AjSCovc0wM",
"title": "2021 International Conference on Data Mining Workshops (ICDMW)",
"acronym": "icdmw",
"groupId": "1001620",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1AjSF9nqIeI",
"doi": "10.1109/ICDMW53433.2021.00118",
"title": "Mixture Gaussian Prototypes for Few-Shot Learning",
"normalizedTitle": "Mixture Gaussian Prototypes for Few-Shot Learning",
"abstract": "In this paper, we provide a new model named Mixture Gaussian Prototypes in few-shot classification problems. In order to describe their features more accurately, our mixture Gaussian prototypes use both the variance and the mean of the data in the support set, while the Gaussian distribution is concise so that it is suitable for the small sample task. Moreover, we verify the validity of our mixture Gaussian prototype through experiments. Also, we perform experiments on Omniglot, mini-ImageNet, and CUB for few-shot Classification and obtain high classification accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we provide a new model named Mixture Gaussian Prototypes in few-shot classification problems. In order to describe their features more accurately, our mixture Gaussian prototypes use both the variance and the mean of the data in the support set, while the Gaussian distribution is concise so that it is suitable for the small sample task. Moreover, we verify the validity of our mixture Gaussian prototype through experiments. Also, we perform experiments on Omniglot, mini-ImageNet, and CUB for few-shot Classification and obtain high classification accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we provide a new model named Mixture Gaussian Prototypes in few-shot classification problems. In order to describe their features more accurately, our mixture Gaussian prototypes use both the variance and the mean of the data in the support set, while the Gaussian distribution is concise so that it is suitable for the small sample task. Moreover, we verify the validity of our mixture Gaussian prototype through experiments. Also, we perform experiments on Omniglot, mini-ImageNet, and CUB for few-shot Classification and obtain high classification accuracy.",
"fno": "242700a902",
"keywords": [
"Conferences",
"Prototypes",
"Gaussian Distribution",
"Data Mining",
"Task Analysis",
"Few Shot Learning",
"Classification Problem",
"Mixture Gaussian Prototypes"
],
"authors": [
{
"affiliation": "Beijing University of Posts and Telecommunications,School of Computer Science,Beijing,China",
"fullName": "Ruijin Jiang",
"givenName": "Ruijin",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "China University of Geosciences,School of Geophysics and Information Technology,Beijing,China",
"fullName": "Zhaohui Cheng",
"givenName": "Zhaohui",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdmw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "902-908",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2427-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "242700a893",
"articleId": "1AjSP7vOr1m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "242700a909",
"articleId": "1AjSSCaigko",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdmw/2010/4257/0/4257a499",
"title": "A Privacy Preserving Framework for Gaussian Mixture Models",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2010/4257a499/12OmNvA1hmA",
"parentPublication": {
"id": "proceedings/icdmw/2010/4257/0",
"title": "2010 IEEE International Conference on Data Mining Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2017/3581/0/3581a704",
"title": "A Comparison Between Different Gaussian-Based Mixture Models",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2017/3581a704/12OmNwLOYWp",
"parentPublication": {
"id": "proceedings/aiccsa/2017/3581/0",
"title": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2021/2398/0/239800a926",
"title": "Few-Shot Partial Multi-Label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2021/239800a926/1Aqx1YNNFU4",
"parentPublication": {
"id": "proceedings/icdm/2021/2398/0",
"title": "2021 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500b040",
"title": "Few-shot Weakly-Supervised Object Detection via Directional Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500b040/1B13fXkZPFe",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200i641",
"title": "Synthesized Feature based Few-Shot Class-Incremental Learning on a Mixture of Subspaces",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200i641/1BmGMCrQSsg",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2021/0679/0/067900a086",
"title": "A Graph-Convolutional-Network based Prototype Mixing Model for Few-shot Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2021/067900a086/1CATlDUEjLO",
"parentPublication": {
"id": "proceedings/itme/2021/0679/0",
"title": "2021 11th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icrss/2022/6403/0/640300a053",
"title": "Mixture Loss Function-based Classification Network for Few-shot Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icrss/2022/640300a053/1M2Ml7PIkuY",
"parentPublication": {
"id": "proceedings/icrss/2022/6403/0",
"title": "2022 International Conference on Computing, Robotics and System Sciences (ICRSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102883",
"title": "Learning Class Prototypes Via Anisotropic Combination of Aligned Modalities for Few-Shot Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102883/1kwqY0xIt7G",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c643",
"title": "Multimodal Prototypical Networks for Few-shot Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c643/1uqGPGp8ig8",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c663",
"title": "RNNP: A Robust Few-Shot Learning Approach",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c663/1uqGncNSrVC",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqJ8taQ",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"acronym": "vast",
"groupId": "1001630",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBkxsqV",
"doi": "10.1109/VAST.2014.7042552",
"title": "Event-based text visual analytics",
"normalizedTitle": "Event-based text visual analytics",
"abstract": "We present an event-based approach for solving a directed sensemaking task in which we combine powerful information foraging tools with intuitive synthesis spaces to solve the VAST Challenge 2014 Mini-Challenge 1. A combination of student-created and commericially available software are used to solve various aspects of the scenario. In addition to applying entitiy extraction and topic modelling, we enable the user to explore a large dataset using multi-model semantic interaction, which infers analytical reasoning from user actions to augment the data spatialization and determine what information should be presented and suggested to the user. Additionally, we visualize extracted topics using Tableau to construct a timeline of events surrounding the questions posed by the challenge.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an event-based approach for solving a directed sensemaking task in which we combine powerful information foraging tools with intuitive synthesis spaces to solve the VAST Challenge 2014 Mini-Challenge 1. A combination of student-created and commericially available software are used to solve various aspects of the scenario. In addition to applying entitiy extraction and topic modelling, we enable the user to explore a large dataset using multi-model semantic interaction, which infers analytical reasoning from user actions to augment the data spatialization and determine what information should be presented and suggested to the user. Additionally, we visualize extracted topics using Tableau to construct a timeline of events surrounding the questions posed by the challenge.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an event-based approach for solving a directed sensemaking task in which we combine powerful information foraging tools with intuitive synthesis spaces to solve the VAST Challenge 2014 Mini-Challenge 1. A combination of student-created and commericially available software are used to solve various aspects of the scenario. In addition to applying entitiy extraction and topic modelling, we enable the user to explore a large dataset using multi-model semantic interaction, which infers analytical reasoning from user actions to augment the data spatialization and determine what information should be presented and suggested to the user. Additionally, we visualize extracted topics using Tableau to construct a timeline of events surrounding the questions posed by the challenge.",
"fno": "07042552",
"keywords": [
"Event Extraction",
"Sensemaking",
"Topic Modelling",
"Semantic Interaction"
],
"authors": [
{
"affiliation": "Virginia Tech",
"fullName": "Ji Wang",
"givenName": "Ji",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech",
"fullName": "Lauren Bradel",
"givenName": "Lauren",
"surname": "Bradel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech",
"fullName": "Chris North",
"givenName": "Chris",
"surname": "North",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vast",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-10-01T00:00:00",
"pubType": "proceedings",
"pages": "333-334",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6227-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07042551",
"articleId": "12OmNBqMDxG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07042553",
"articleId": "12OmNxwENmn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042492",
"title": "Multi-model semantic interaction for text analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042492/12OmNBkP3EP",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2007/1659/0/04389011",
"title": "Design Considerations for Collaborative Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2007/04389011/12OmNvqW6XW",
"parentPublication": {
"id": "proceedings/vast/2007/1659/0",
"title": "2007 IEEE Symposium on Visual Analytics Science and Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2015/9783/0/07347625",
"title": "Mixed-initiative visual analytics using task-driven recommendations",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2015/07347625/12OmNwc3wtQ",
"parentPublication": {
"id": "proceedings/vast/2015/9783/0",
"title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2015/7343/0/07314287",
"title": "Big Text Visual Analytics in Sensemaking",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314287/12OmNylKAXL",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122879",
"title": "Semantic Interaction for Sensemaking: Inferring Analytical Reasoning for Model Steering",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122879/13rRUwdIOUL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061033",
"title": "Comparing Dot and Landscape Spatializations for Visual Memory Differences",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061033/13rRUxAASSZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585564",
"title": "Interactive Visual Analytics Application for Spatiotemporal Movement Data VAST Challenge 2017 Mini-Challenge 1: Award for Actionable and Detailed Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585564/17D45VsBU7R",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2018/9194/0/08534027",
"title": "Visual Analytics on Large Displays: Exploring User Spatialization and How Size and Resolution Affect Task Performance",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2018/08534027/17D45XvMcbt",
"parentPublication": {
"id": "proceedings/bdva/2018/9194/0",
"title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2010/9488/0/05651204",
"title": "VisWorks text and network visual analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2010/05651204/1eof34V7mPS",
"parentPublication": {
"id": "proceedings/vast/2010/9488/0",
"title": "2010 IEEE Symposium on Visual Analytics Science and Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a081",
"title": "Designing for Ambiguity: Visual Analytics in Avalanche Forecasting",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a081/1qROrjfqgSI",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAolH1t",
"title": "2016 IEEE 14th Intl Conf on Dependable, Autonomic and Secure Computing, 14th Intl Conf on Pervasive Intelligence and Computing, 2nd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"acronym": "dasc-picom-datacom-cyberscitech",
"groupId": "1001364",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrEL2BM",
"doi": "10.1109/DASC-PICom-DataCom-CyberSciTec.2016.122",
"title": "Online Distance Measurement for Tree Data Event Streams",
"normalizedTitle": "Online Distance Measurement for Tree Data Event Streams",
"abstract": "Distances for trees have already been researched for decades. Thus most of the research is focused on static tree data. E.g. the calculation of XML documents when considering the aggregation of different data sources. Nowadays rising amounts of data require analysis on data streams. This results in the analysis of dynamic trees. Analysis on data streams require one-pass algorithms as well as efficient resource utilisation regarding memory and CPU. Our use case is the network traffic monitoring and analysis of batch jobs in a batch system. The monitoring data is delivered via a monitoring event stream per batch job. Each batch job implements specific workflows that build up the dynamic tree structure. To compare the batch jobs efficiently against each other over time, we utilise an incremental distance measurement for dynamic trees. This paper presents the incremental distance measurement. It utilises a sketching of trees using signatures. Furthermore we rely on hash tables to realise fast access and calculations. We analyse the properties of our incremental distance measurement and compare it to the well-known tree edit distance. All experiments are performed on real world data and confirm the feasibility and scalability of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Distances for trees have already been researched for decades. Thus most of the research is focused on static tree data. E.g. the calculation of XML documents when considering the aggregation of different data sources. Nowadays rising amounts of data require analysis on data streams. This results in the analysis of dynamic trees. Analysis on data streams require one-pass algorithms as well as efficient resource utilisation regarding memory and CPU. Our use case is the network traffic monitoring and analysis of batch jobs in a batch system. The monitoring data is delivered via a monitoring event stream per batch job. Each batch job implements specific workflows that build up the dynamic tree structure. To compare the batch jobs efficiently against each other over time, we utilise an incremental distance measurement for dynamic trees. This paper presents the incremental distance measurement. It utilises a sketching of trees using signatures. Furthermore we rely on hash tables to realise fast access and calculations. We analyse the properties of our incremental distance measurement and compare it to the well-known tree edit distance. All experiments are performed on real world data and confirm the feasibility and scalability of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Distances for trees have already been researched for decades. Thus most of the research is focused on static tree data. E.g. the calculation of XML documents when considering the aggregation of different data sources. Nowadays rising amounts of data require analysis on data streams. This results in the analysis of dynamic trees. Analysis on data streams require one-pass algorithms as well as efficient resource utilisation regarding memory and CPU. Our use case is the network traffic monitoring and analysis of batch jobs in a batch system. The monitoring data is delivered via a monitoring event stream per batch job. Each batch job implements specific workflows that build up the dynamic tree structure. To compare the batch jobs efficiently against each other over time, we utilise an incremental distance measurement for dynamic trees. This paper presents the incremental distance measurement. It utilises a sketching of trees using signatures. Furthermore we rely on hash tables to realise fast access and calculations. We analyse the properties of our incremental distance measurement and compare it to the well-known tree edit distance. All experiments are performed on real world data and confirm the feasibility and scalability of our approach.",
"fno": "07588920",
"keywords": [
"Monitoring",
"Distance Measurement",
"XML",
"Scalability",
"Electronic Mail",
"Heuristic Algorithms",
"Resource Management"
],
"authors": [
{
"affiliation": null,
"fullName": "Eileen Kuehn",
"givenName": "Eileen",
"surname": "Kuehn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Achim Streit",
"givenName": "Achim",
"surname": "Streit",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dasc-picom-datacom-cyberscitech",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-08-01T00:00:00",
"pubType": "proceedings",
"pages": "681-688",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-4065-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07588919",
"articleId": "12OmNyen1qZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07588921",
"articleId": "12OmNC943R3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sbec/2016/2132/0/07458964",
"title": "Assessment of Distance Measurement with Selected Wearable Devices in Telemonitoring",
"doi": null,
"abstractUrl": "/proceedings-article/sbec/2016/07458964/12OmNrAdsCa",
"parentPublication": {
"id": "proceedings/sbec/2016/2132/0",
"title": "2016 32nd Southern Biomedical Engineering Conference (SBEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/2/3962c789",
"title": "Novel Way of Scalar Miss Distance Measurement",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962c789/12OmNrkT7wA",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/2",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2012/0430/0/06386344",
"title": "Distance measurement for tower crane obstacle based on multi-ultrasonic sensors",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386344/12OmNwF0BSU",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/saint/2010/4107/0/4107a209",
"title": "ManeuverXML: Distance-Measurement Based Operation Event Description Model and User Interaction Interpretation",
"doi": null,
"abstractUrl": "/proceedings-article/saint/2010/4107a209/12OmNx0A7Or",
"parentPublication": {
"id": "proceedings/saint/2010/4107/0",
"title": "2010 10th IEEE/IPSJ International Symposium on Applications and the Internet",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a237",
"title": "Incremental Distance Transforms (IDT)",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a237/12OmNxFaLv6",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mue/2009/3658/0/3658a137",
"title": "Person to Camera Distance Measurement Based on Eye-Distance",
"doi": null,
"abstractUrl": "/proceedings-article/mue/2009/3658a137/12OmNz6iO6W",
"parentPublication": {
"id": "proceedings/mue/2009/3658/0",
"title": "Multimedia and Ubiquitous Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2003/06/k1561",
"title": "Adaptive and Incremental Processing for Distance Join Queries",
"doi": null,
"abstractUrl": "/journal/tk/2003/06/k1561/13rRUEgarnZ",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1979/02/06786615",
"title": "A Tree-to-Tree Distance and Its Application to Cluster Analysis",
"doi": null,
"abstractUrl": "/journal/tp/1979/02/06786615/13rRUxlgy4u",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/2022/2055/0/205500a792",
"title": "Breaking the Cubic Barrier for (Unweighted) Tree Edit Distance",
"doi": null,
"abstractUrl": "/proceedings-article/focs/2022/205500a792/1BtfEPfypLW",
"parentPublication": {
"id": "proceedings/focs/2022/2055/0",
"title": "2021 IEEE 62nd Annual Symposium on Foundations of Computer Science (FOCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2022/1008/0/10017876",
"title": "STREamRHF: Tree-Based Unsupervised Anomaly Detection for Data Streams",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2022/10017876/1KJxuvEdL4k",
"parentPublication": {
"id": "proceedings/aiccsa/2022/1008/0",
"title": "2022 IEEE/ACS 19th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNz5JC3w",
"title": "2014 IEEE International Conference on Big Data (Big Data)",
"acronym": "big-data",
"groupId": "1802964",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzahcdT",
"doi": "10.1109/BigData.2014.7004253",
"title": "Topic similarity networks: Visual analytics for large document sets",
"normalizedTitle": "Topic similarity networks: Visual analytics for large document sets",
"abstract": "We investigate ways in which to improve the interpretability of LDA topic models by better analyzing and visualizing their outputs. We focus on examining what we refer to as topic similarity networks: graphs in which nodes represent latent topics in text collections and links represent similarity among topics. We describe efficient and effective approaches to both building and labeling such networks. Visualizations of topic models based on these networks are shown to be a powerful means of exploring, characterizing, and summarizing large collections of unstructured text documents. They help to “tease out” non-obvious connections among different sets of documents and provide insights into how topics form larger themes. We demonstrate the efficacy and practicality of these approaches through two case studies: 1) NSF grants for basic research spanning a 14 year period and 2) the entire English portion of Wikipedia.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We investigate ways in which to improve the interpretability of LDA topic models by better analyzing and visualizing their outputs. We focus on examining what we refer to as topic similarity networks: graphs in which nodes represent latent topics in text collections and links represent similarity among topics. We describe efficient and effective approaches to both building and labeling such networks. Visualizations of topic models based on these networks are shown to be a powerful means of exploring, characterizing, and summarizing large collections of unstructured text documents. They help to “tease out” non-obvious connections among different sets of documents and provide insights into how topics form larger themes. We demonstrate the efficacy and practicality of these approaches through two case studies: 1) NSF grants for basic research spanning a 14 year period and 2) the entire English portion of Wikipedia.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We investigate ways in which to improve the interpretability of LDA topic models by better analyzing and visualizing their outputs. We focus on examining what we refer to as topic similarity networks: graphs in which nodes represent latent topics in text collections and links represent similarity among topics. We describe efficient and effective approaches to both building and labeling such networks. Visualizations of topic models based on these networks are shown to be a powerful means of exploring, characterizing, and summarizing large collections of unstructured text documents. They help to “tease out” non-obvious connections among different sets of documents and provide insights into how topics form larger themes. We demonstrate the efficacy and practicality of these approaches through two case studies: 1) NSF grants for basic research spanning a 14 year period and 2) the entire English portion of Wikipedia.",
"fno": "07004253",
"keywords": [
"Labeling",
"Data Visualization",
"Communities",
"Proteins",
"Visualization",
"Probability Distribution",
"Big Data"
],
"authors": [
{
"affiliation": "Institute for Defense Analyses, Alexandria, VA 22311",
"fullName": "Arun S. Maiya",
"givenName": "Arun S.",
"surname": "Maiya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Defense Analyses, Alexandria, VA 22311",
"fullName": "Robert M. Rolfe",
"givenName": "Robert M.",
"surname": "Rolfe",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "big-data",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-10-01T00:00:00",
"pubType": "proceedings",
"pages": "364-372",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-5666-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07004252",
"articleId": "12OmNC1Y5m5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07004254",
"articleId": "12OmNAsBFNH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ialp/2010/4288/0/4288a195",
"title": "Topic-Driven Multi-document Summarization",
"doi": null,
"abstractUrl": "/proceedings-article/ialp/2010/4288a195/12OmNBSjJ6X",
"parentPublication": {
"id": "proceedings/ialp/2010/4288/0",
"title": "Asian Language Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2009/3733/0/3733a149",
"title": "Topic-Based Coordination for Visual Analysis of Evolving Document Collections",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2009/3733a149/12OmNBhpS9v",
"parentPublication": {
"id": "proceedings/iv/2009/3733/0",
"title": "2009 13th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2016/9005/0/07840592",
"title": "Pairwise topic model and its application to topic transition and evolution",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2016/07840592/12OmNrAMEJi",
"parentPublication": {
"id": "proceedings/big-data/2016/9005/0",
"title": "2016 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2014/4302/0/4302a803",
"title": "Topic Models with Topic Ordering Regularities for Topic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2014/4302a803/12OmNvEyRbL",
"parentPublication": {
"id": "proceedings/icdm/2014/4302/0",
"title": "2014 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2010/5397/0/05432741",
"title": "Topic Extraction for a Large Document Set with the Topic Integration",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2010/05432741/12OmNwDSdqU",
"parentPublication": {
"id": "proceedings/wkdd/2010/5397/0",
"title": "2010 3rd International Conference on Knowledge Discovery and Data Mining (WKDD 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890165",
"title": "Cross media topic analytics based on synergetic content and user behavior modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890165/12OmNwEJ0HA",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2014/5569/0/06970174",
"title": "Representing topics labels for exploring digital libraries",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2014/06970174/12OmNx0RIRl",
"parentPublication": {
"id": "proceedings/jcdl/2014/5569/0",
"title": "2014 IEEE/ACM Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2011/4408/0/4408a101",
"title": "SolarMap: Multifaceted Visual Analytics for Topic Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2011/4408a101/12OmNzw8j1t",
"parentPublication": {
"id": "proceedings/icdm/2011/4408/0",
"title": "2011 IEEE 11th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecnlpir/2022/7382/0/738200a084",
"title": "Multilingual Document Concept Topic Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/ecnlpir/2022/738200a084/1KMQ5U38nXG",
"parentPublication": {
"id": "proceedings/ecnlpir/2022/7382/0",
"title": "2022 European Conference on Natural Language Processing and Information Retrieval (ECNLPIR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a148",
"title": "An Interactive Visual Analytics System for Incremental Classification Based on Semi-supervised Topic Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a148/1cMF8cnyXfi",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzTH0FY",
"title": "2011 IEEE 11th International Conference on Data Mining",
"acronym": "icdm",
"groupId": "1000179",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzw8j1t",
"doi": "10.1109/ICDM.2011.135",
"title": "SolarMap: Multifaceted Visual Analytics for Topic Exploration",
"normalizedTitle": "SolarMap: Multifaceted Visual Analytics for Topic Exploration",
"abstract": "Documents in rich text corpora often contain multiple facets of information. For example, an article from a medical document collection might consist of multifaceted information about symptoms, treatments, causes, diagnoses, prognoses, and preventions. Thus, documents in the collection may have different relations across each of these various facets. Topic analysis and exploration for such multi-relational corpora is a challenging visual analytic task. This paper presents Solar Map, a multifaceted visual analytic technique for visually exploring topics in multi-relational data. Solar Map simultaneously visualizes the topic distribution of the underlying entities from one facet together with keyword distributions that convey the semantic definition of each cluster along a secondary facet. Solar Map combines several visual techniques including 1) topic contour clusters and interactive multifaceted keyword topic rings, 2) a global layout optimization algorithm that aligns each topic cluster with its corresponding keywords, and 3) 2) an optimal temporal network segmentation and layout method that renders temporal evolution of clusters. Finally, the paper concludes with two case studies and quantitative user evaluation which show the power of the Solar Map technique.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Documents in rich text corpora often contain multiple facets of information. For example, an article from a medical document collection might consist of multifaceted information about symptoms, treatments, causes, diagnoses, prognoses, and preventions. Thus, documents in the collection may have different relations across each of these various facets. Topic analysis and exploration for such multi-relational corpora is a challenging visual analytic task. This paper presents Solar Map, a multifaceted visual analytic technique for visually exploring topics in multi-relational data. Solar Map simultaneously visualizes the topic distribution of the underlying entities from one facet together with keyword distributions that convey the semantic definition of each cluster along a secondary facet. Solar Map combines several visual techniques including 1) topic contour clusters and interactive multifaceted keyword topic rings, 2) a global layout optimization algorithm that aligns each topic cluster with its corresponding keywords, and 3) 2) an optimal temporal network segmentation and layout method that renders temporal evolution of clusters. Finally, the paper concludes with two case studies and quantitative user evaluation which show the power of the Solar Map technique.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Documents in rich text corpora often contain multiple facets of information. For example, an article from a medical document collection might consist of multifaceted information about symptoms, treatments, causes, diagnoses, prognoses, and preventions. Thus, documents in the collection may have different relations across each of these various facets. Topic analysis and exploration for such multi-relational corpora is a challenging visual analytic task. This paper presents Solar Map, a multifaceted visual analytic technique for visually exploring topics in multi-relational data. Solar Map simultaneously visualizes the topic distribution of the underlying entities from one facet together with keyword distributions that convey the semantic definition of each cluster along a secondary facet. Solar Map combines several visual techniques including 1) topic contour clusters and interactive multifaceted keyword topic rings, 2) a global layout optimization algorithm that aligns each topic cluster with its corresponding keywords, and 3) 2) an optimal temporal network segmentation and layout method that renders temporal evolution of clusters. Finally, the paper concludes with two case studies and quantitative user evaluation which show the power of the Solar Map technique.",
"fno": "4408a101",
"keywords": [
"Visual Analytics",
"Multifaceted Information Visualization",
"Temporal Topic Visualization"
],
"authors": [
{
"affiliation": null,
"fullName": "Nan Cao",
"givenName": "Nan",
"surname": "Cao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "David Gotz",
"givenName": "David",
"surname": "Gotz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jimeng Sun",
"givenName": "Jimeng",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu-Ru Lin",
"givenName": "Yu-Ru",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Huamin Qu",
"givenName": "Huamin",
"surname": "Qu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-12-01T00:00:00",
"pubType": "proceedings",
"pages": "101-110",
"year": "2011",
"issn": "1550-4786",
"isbn": "978-0-7695-4408-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4408a091",
"articleId": "12OmNAkWvn2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4408a111",
"articleId": "12OmNwEJ0S0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2009/3733/0/3733a149",
"title": "Topic-Based Coordination for Visual Analysis of Evolving Document Collections",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2009/3733a149/12OmNBhpS9v",
"parentPublication": {
"id": "proceedings/iv/2009/3733/0",
"title": "2009 13th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061172",
"title": "FacetAtlas: Multifaceted Visualization for Rich Text Corpora",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061172/13rRUEgs2BR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/02/mcg2009020014",
"title": "Defining Insight for Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2009/02/mcg2009020014/13rRUwh80JN",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/07/mco2013070022",
"title": "Visual Analytics Infrastructures: From Data Management to Exploration",
"doi": null,
"abstractUrl": "/magazine/co/2013/07/mco2013070022/13rRUx0gelz",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2014/05/mcg2014050052",
"title": "A Visual-Analytics System for Railway Safety Management",
"doi": null,
"abstractUrl": "/magazine/cg/2014/05/mcg2014050052/13rRUxCRFQk",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122899",
"title": "A Visual Analytics Approach to Multiscale Exploration of Environmental Time Series",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122899/13rRUxDqS8g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876049",
"title": "Progressive Visual Analytics: User-Driven Visual Exploration of In-Progress Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876049/13rRUyogGAd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585638",
"title": "E-Map: A Visual Analytics Approach for Exploring Significant Event Evolutions in Social Media",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585638/17D45WrVg7l",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904482",
"title": "OBTracker: Visual Analytics of Off-ball Movements in Basketball",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904482/1H0GhtN7zkA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2021/3931/0/393100a206",
"title": "KeywordMap: Attention-based Visual Exploration for Keyword Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2021/393100a206/1tTtpeWwWuQ",
"parentPublication": {
"id": "proceedings/pacificvis/2021/3931/0",
"title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cpqjBXCukg",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"acronym": "pacificvis",
"groupId": "1001657",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cMF7meccAo",
"doi": "10.1109/PacificVis.2019.00027",
"title": "Visual Analytics of Taxi Trajectory Data via Topical Sub-trajectories",
"normalizedTitle": "Visual Analytics of Taxi Trajectory Data via Topical Sub-trajectories",
"abstract": "GPS-based taxi trajectories contain valuable knowledge about movement behaviors for transportation and urban planning. Topic modeling is an effective tool to extract semantic information from taxi trajectories. However, previous methods generally ignore the direction of trajectories. In this paper, we employ the bigram topic model instead of traditional topic models to analyze textualized trajectories to take into account the direction information of trajectories. We further propose a modified Apriori algorithm to extract frequent sub-trajectories and use them to represent each topic as topical sub-trajectories. Finally, we design a visual analytics system with several linked views to facilitate users to interactively explore topics, sub-trajectories, and trips. We demonstrate the effectiveness of our system via case studies with Chengdu taxi trajectory data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "GPS-based taxi trajectories contain valuable knowledge about movement behaviors for transportation and urban planning. Topic modeling is an effective tool to extract semantic information from taxi trajectories. However, previous methods generally ignore the direction of trajectories. In this paper, we employ the bigram topic model instead of traditional topic models to analyze textualized trajectories to take into account the direction information of trajectories. We further propose a modified Apriori algorithm to extract frequent sub-trajectories and use them to represent each topic as topical sub-trajectories. Finally, we design a visual analytics system with several linked views to facilitate users to interactively explore topics, sub-trajectories, and trips. We demonstrate the effectiveness of our system via case studies with Chengdu taxi trajectory data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "GPS-based taxi trajectories contain valuable knowledge about movement behaviors for transportation and urban planning. Topic modeling is an effective tool to extract semantic information from taxi trajectories. However, previous methods generally ignore the direction of trajectories. In this paper, we employ the bigram topic model instead of traditional topic models to analyze textualized trajectories to take into account the direction information of trajectories. We further propose a modified Apriori algorithm to extract frequent sub-trajectories and use them to represent each topic as topical sub-trajectories. Finally, we design a visual analytics system with several linked views to facilitate users to interactively explore topics, sub-trajectories, and trips. We demonstrate the effectiveness of our system via case studies with Chengdu taxi trajectory data.",
"fno": "922600a174",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Global Positioning System",
"Text Analysis",
"Town And Country Planning",
"Traffic Engineering Computing",
"GPS Based Taxi Trajectories",
"Semantic Information",
"Bigram Topic Model",
"Textualized Trajectories",
"Visual Analytics System",
"Chengdu Taxi Trajectory Data",
"Transportation",
"Urban Planning",
"Apriori Algorithm",
"Trajectory",
"Public Transportation",
"Roads",
"Data Mining",
"Data Visualization",
"Visual Analytics",
"Semantics",
"Topic Modeling",
"Visual Analytics",
"Taxi Trajectory"
],
"authors": [
{
"affiliation": "Zhejiang University",
"fullName": "Sichen Jin",
"givenName": "Sichen",
"surname": "Jin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University",
"fullName": "Yubo Tao",
"givenName": "Yubo",
"surname": "Tao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University",
"fullName": "Yuyu Yan",
"givenName": "Yuyu",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University",
"fullName": "Jin Xu",
"givenName": "Jin",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University",
"fullName": "Hai Lin",
"givenName": "Hai",
"surname": "Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pacificvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-04-01T00:00:00",
"pubType": "proceedings",
"pages": "174-178",
"year": "2019",
"issn": null,
"isbn": "978-1-5386-9226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "922600a202",
"articleId": "1dlwrj2iD60",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "922600a179",
"articleId": "1cMF6JrG9JS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/big-data/2015/9926/0/07364113",
"title": "Taxi trip time prediction using similar trips and road network data",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07364113/12OmNrMZpzH",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icss/2015/9947/0/9947a099",
"title": "A Comprehensive Survey of Recommendation System Based on Taxi GPS Trajectory",
"doi": null,
"abstractUrl": "/proceedings-article/icss/2015/9947a099/12OmNxR5UHE",
"parentPublication": {
"id": "proceedings/icss/2015/9947/0",
"title": "2015 International Conference on Service Science (ICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2015/8493/0/8493a407",
"title": "Estimating Taxi Demand-Supply Level Using Taxi Trajectory Data Stream",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2015/8493a407/12OmNxaNGil",
"parentPublication": {
"id": "proceedings/icdmw/2015/8493/0",
"title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2016/9005/0/07840904",
"title": "Big data computation of taxi movement in New York City",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2016/07840904/12OmNxwncfV",
"parentPublication": {
"id": "proceedings/big-data/2016/9005/0",
"title": "2016 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192687",
"title": "TrajGraph: A Graph-Based Visual Analytics Approach to Studying Urban Network Centralities Using Taxi Trajectory Data",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192687/13rRUwInvBa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534822",
"title": "SemanticTraj: A New Approach to Interacting with Massive Taxi Trajectories",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534822/13rRUygT7sI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2020/01/08492462",
"title": "Optimizing Taxi Driver Profit Efficiency: A Spatial Network-Based Markov Decision Process Approach",
"doi": null,
"abstractUrl": "/journal/bd/2020/01/08492462/14qdcQzyWVN",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08257968",
"title": "Detecting unmetered taxi rides from trajectory data",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08257968/17D45WODapu",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101623",
"title": "Mobility-Aware Dynamic Taxi Ridesharing",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101623/1kaMziiyYz6",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a378",
"title": "RoseTrajVis: Visual Analytics of Trajectories with Rose Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a378/1rSRa9dXxDO",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cYi06q10li",
"title": "2019 IEEE 19th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cYi3viyECY",
"doi": "10.1109/ICALT.2019.00058",
"title": "Analysing Social Presence in Online Discussions Through Network and Text Analytics",
"normalizedTitle": "Analysing Social Presence in Online Discussions Through Network and Text Analytics",
"abstract": "This paper presents an approach to studying relationships between students' social presence and course topics from transcripts of asynchronous discussions in online learning environments. Specifically, the paper uses topic modeling and epistemic network analysis to investigate how students' social presence is expressed across different course topics. Finally, we show how this method can be adopted to examine how students' social presence changed due to an instructional intervention. The results of this study and its implications are further discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an approach to studying relationships between students' social presence and course topics from transcripts of asynchronous discussions in online learning environments. Specifically, the paper uses topic modeling and epistemic network analysis to investigate how students' social presence is expressed across different course topics. Finally, we show how this method can be adopted to examine how students' social presence changed due to an instructional intervention. The results of this study and its implications are further discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an approach to studying relationships between students' social presence and course topics from transcripts of asynchronous discussions in online learning environments. Specifically, the paper uses topic modeling and epistemic network analysis to investigate how students' social presence is expressed across different course topics. Finally, we show how this method can be adopted to examine how students' social presence changed due to an instructional intervention. The results of this study and its implications are further discussed.",
"fno": "348500a163",
"keywords": [
"Computer Aided Instruction",
"Educational Courses",
"Text Analysis",
"Online Discussions",
"Students",
"Asynchronous Discussions",
"Online Learning Environments",
"Topic Modeling",
"Epistemic Network Analysis",
"Different Course Topics",
"Social Presence",
"Education",
"Encoding",
"Analytical Models",
"Australia",
"Natural Language Processing",
"Tools",
"Message Systems",
"Social Presence",
"Community Of Inquiry",
"Epistemic Network Analysis",
"Online Discussions",
"Distance Education",
"Topic Modelling",
"Natural Language Processing"
],
"authors": [
{
"affiliation": "Universidade Federal de Pernambuco",
"fullName": "Vitor Rolim",
"givenName": "Vitor",
"surname": "Rolim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade Federal Rural de Pernambuco",
"fullName": "Rafael Ferreira Leite de Mello",
"givenName": "Rafael",
"surname": "Ferreira Leite de Mello",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Vitomir Kovanovic",
"givenName": "Vitomir",
"surname": "Kovanovic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Information Technology",
"fullName": "Dragan Gaševic",
"givenName": "Dragan",
"surname": "Gaševic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-07-01T00:00:00",
"pubType": "proceedings",
"pages": "163-167",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3485-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "348500a158",
"articleId": "1cYi2u2hrhu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "348500a168",
"articleId": "1cYi10OvUA0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2007/2916/0/29160701",
"title": "Social presence and student perceptions in the blend of synchronous and asynchronous communication forms",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2007/29160701/12OmNAkniXF",
"parentPublication": {
"id": "proceedings/icalt/2007/2916/0",
"title": "2007 International Conference on Advanced Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/latice/2014/3592/0/3592a289",
"title": "Students' Social Presence in Online Learning System",
"doi": null,
"abstractUrl": "/proceedings-article/latice/2014/3592a289/12OmNqOffAy",
"parentPublication": {
"id": "proceedings/latice/2014/3592/0",
"title": "2014 International Conference on Teaching and Learning in Computing and Engineering (LaTiCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2006/2586/0/25860143",
"title": "Comparing a text- and visual-based interface presenting social information in an online environment",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2006/25860143/12OmNxRF6Wo",
"parentPublication": {
"id": "proceedings/vlhcc/2006/2586/0",
"title": "IEEE Symposium on Visual Languages and Human-Centric Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2008/3503/0/3503a468",
"title": "Mining Unstructured Text at Gigabyte per Second Speeds",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2008/3503a468/12OmNzUPpv6",
"parentPublication": {
"id": "proceedings/icdmw/2008/3503/0",
"title": "2008 IEEE International Conference on Data Mining Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2008/3167/0/3167a399",
"title": "Enhancing a Free-Text Adaptive Computer Assisted Assessment System with Self-Assessment Features",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2008/3167a399/12OmNzdoMvP",
"parentPublication": {
"id": "proceedings/icalt/2008/3167/0",
"title": "IEEE International Conference on Advanced Learning Technologies (ICALT 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2018/7447/0/744701a366",
"title": "Practicing the Scholarship of Teaching and Learning with Classroom Learning Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2018/744701a366/19m3DbGndJu",
"parentPublication": {
"id": "proceedings/iiai-aai/2018/7447/0",
"title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2019/3485/0/348500a158",
"title": "Academic Performance Analysis Supported by a Web-Based Visual Analytics Tool",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2019/348500a158/1cYi2u2hrhu",
"parentPublication": {
"id": "proceedings/icalt/2019/3485/2161-377X",
"title": "2019 IEEE 19th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047433",
"title": "Mining Health Discussions on Suomi24",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047433/1iC6qRerROE",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2019/6303/0/09162369",
"title": "Analysing Computer Science Course Using Learning Analytics Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2019/09162369/1m6hOIlbZn2",
"parentPublication": {
"id": "proceedings/csde/2019/6303/0",
"title": "2019 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsecs-icocsim/2021/1407/0/140700a136",
"title": "Analysing The Impact of Social Presence on Student Satisfaction Through Small Group Discussion in A Synchronous Online Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icsecs-icocsim/2021/140700a136/1wYls0LFTkA",
"parentPublication": {
"id": "proceedings/icsecs-icocsim/2021/1407/0",
"title": "2021 International Conference on Software Engineering & Computer Systems and 4th International Conference on Computational Science and Information Management (ICSECS-ICOCSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCbCrVT",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAtaRZ2",
"doi": "10.1109/CVPR.2014.67",
"title": "Fast, Approximate Piecewise-Planar Modeling Based on Sparse Structure-from-Motion and Superpixels",
"normalizedTitle": "Fast, Approximate Piecewise-Planar Modeling Based on Sparse Structure-from-Motion and Superpixels",
"abstract": "State-of-the-art Multi-View Stereo (MVS) algorithms deliver dense depth maps or complex meshes with very high detail, and redundancy over regular surfaces. In turn, our interest lies in an approximate, but light-weight method that is better to consider for large-scale applications, such as urban scene reconstruction from ground-based images. We present a novel approach for producing dense reconstructions from multiple images and from the underlying sparse Structure-from-Motion (SfM) data in an efficient way. To overcome the problem of SfM sparsity and textureless areas, we assume piecewise planarity of man-made scenes and exploit both sparse visibility and a fast over-segmentation of the images. Reconstruction is formulated as an energy-driven, multi-view plane assignment problem, which we solve jointly over superpixels from all views while avoiding expensive photoconsistency computations. The resulting planar primitives -- defined by detailed superpixel boundaries -- are computed in about 10 seconds per image.",
"abstracts": [
{
"abstractType": "Regular",
"content": "State-of-the-art Multi-View Stereo (MVS) algorithms deliver dense depth maps or complex meshes with very high detail, and redundancy over regular surfaces. In turn, our interest lies in an approximate, but light-weight method that is better to consider for large-scale applications, such as urban scene reconstruction from ground-based images. We present a novel approach for producing dense reconstructions from multiple images and from the underlying sparse Structure-from-Motion (SfM) data in an efficient way. To overcome the problem of SfM sparsity and textureless areas, we assume piecewise planarity of man-made scenes and exploit both sparse visibility and a fast over-segmentation of the images. Reconstruction is formulated as an energy-driven, multi-view plane assignment problem, which we solve jointly over superpixels from all views while avoiding expensive photoconsistency computations. The resulting planar primitives -- defined by detailed superpixel boundaries -- are computed in about 10 seconds per image.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "State-of-the-art Multi-View Stereo (MVS) algorithms deliver dense depth maps or complex meshes with very high detail, and redundancy over regular surfaces. In turn, our interest lies in an approximate, but light-weight method that is better to consider for large-scale applications, such as urban scene reconstruction from ground-based images. We present a novel approach for producing dense reconstructions from multiple images and from the underlying sparse Structure-from-Motion (SfM) data in an efficient way. To overcome the problem of SfM sparsity and textureless areas, we assume piecewise planarity of man-made scenes and exploit both sparse visibility and a fast over-segmentation of the images. Reconstruction is formulated as an energy-driven, multi-view plane assignment problem, which we solve jointly over superpixels from all views while avoiding expensive photoconsistency computations. The resulting planar primitives -- defined by detailed superpixel boundaries -- are computed in about 10 seconds per image.",
"fno": "5118a469",
"keywords": [
"Image Reconstruction",
"Image Segmentation",
"Stereo Image Processing",
"Piecewise Planar Modeling",
"Multi View Stereo Algorithms",
"MVS Algorithms",
"Dense Depth Maps",
"Light Weight Method",
"Urban Scene Reconstruction",
"Ground Based Images",
"Dense Reconstructions",
"Sparse Structure From Motion Data",
"Sf M Data",
"Sf M Sparsity",
"Textureless Areas",
"Piecewise Planarity",
"Man Made Scenes",
"Fast Image Over Segmentation",
"Image Reconstruction",
"Superpixel Boundary",
"Three Dimensional Displays",
"Image Reconstruction",
"Image Color Analysis",
"Image Segmentation",
"Surface Reconstruction",
"Robustness",
"Optimization",
"Multi View",
"Superpixels",
"Structure From Motion",
"Sf M",
"Reconstruction",
"Piecewise Planar",
"Photoconsistency",
"Sparse",
"Segmentation",
"Plane Fitting"
],
"authors": [
{
"affiliation": "Comput. Vision Lab., ETH Zurich, Zurich, Switzerland",
"fullName": "András Bódis-Szomorú",
"givenName": "András",
"surname": "Bódis-Szomorú",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Comput. Vision Lab., ETH Zurich, Zurich, Switzerland",
"fullName": "Hayko Riemenschneider",
"givenName": "Hayko",
"surname": "Riemenschneider",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Comput. Vision Lab., ETH Zurich, Zurich, Switzerland",
"fullName": "Luc Van Gool",
"givenName": "Luc",
"surname": "Van Gool",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-06-01T00:00:00",
"pubType": "proceedings",
"pages": "469-476",
"year": "2014",
"issn": "1063-6919",
"isbn": "978-1-4799-5118-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5118a461",
"articleId": "12OmNy1SFI0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5118a477",
"articleId": "12OmNwFRp6g",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2012/2027/0/06266449",
"title": "A Dense 3D Reconstruction Approach from Uncalibrated Video Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2012/06266449/12OmNAlvHLm",
"parentPublication": {
"id": "proceedings/icmew/2012/2027/0",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a293",
"title": "Incremental Reconstruction of Manifold Surface from Sparse Visual Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a293/12OmNAoDinI",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2001/1272/2/127220283",
"title": "Piecewise Planar Segmentation for Automatic Scene Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2001/127220283/12OmNB9bvlG",
"parentPublication": {
"id": "proceedings/cvpr/2001/1272/2",
"title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a029",
"title": "Depth Interpolation via Smooth Surface Segmentation Using Tangent Planes Based on the Superpixels of a Color Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a029/12OmNCeK2gP",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a264",
"title": "3D Surface Reconstruction from Point-and-Line Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a264/12OmNrAMEVf",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a860",
"title": "Approximate Cross Channel Color Mapping from Sparse Color Correspondences",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a860/12OmNwnH4Pk",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890288",
"title": "A new sparse feature-based patch for dense correspondence",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890288/12OmNyr8YpE",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459417",
"title": "Piecewise planar stereo for image-based rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459417/12OmNz61d3q",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/08/08006257",
"title": "Piecewise-Planar StereoScan: Sequential Structure and Motion Using Plane Primitives",
"doi": null,
"abstractUrl": "/journal/tp/2018/08/08006257/13rRUEgaru7",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486548",
"title": "Dense Reconstruction from Monocular Slam with Fusion of Sparse Map-Points and Cnn-Inferred Depth",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486548/14jQfP7ey4y",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBaT60w",
"title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)",
"acronym": "fg",
"groupId": "1000065",
"volume": "1",
"displayVolume": "1",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvpw7mw",
"doi": "10.1109/FG.2015.7163142",
"title": "Dense 3D face alignment from 2D videos in real-time",
"normalizedTitle": "Dense 3D face alignment from 2D videos in real-time",
"abstract": "To enable real-time, person-independent 3D registration from 2D video, we developed a 3D cascade regression approach in which facial landmarks remain invariant across pose over a range of approximately 60 degrees. From a single 2D image of a person's face, a dense 3D shape is registered in real time for each frame. The algorithm utilizes a fast cascade regression framework trained on high-resolution 3D face-scans of posed and spontaneous emotion expression. The algorithm first estimates the location of a dense set of markers and their visibility, then reconstructs face shapes by fitting a part-based 3D model. Because no assumptions are required about illumination or surface properties, the method can be applied to a wide range of imaging conditions that include 2D video and uncalibrated multi-view video. The method has been validated in a battery of experiments that evaluate its precision of 3D reconstruction and extension to multi-view reconstruction. Experimental findings strongly support the validity of real-time, 3D registration and reconstruction from 2D video. The software is available online at http://zface.org.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To enable real-time, person-independent 3D registration from 2D video, we developed a 3D cascade regression approach in which facial landmarks remain invariant across pose over a range of approximately 60 degrees. From a single 2D image of a person's face, a dense 3D shape is registered in real time for each frame. The algorithm utilizes a fast cascade regression framework trained on high-resolution 3D face-scans of posed and spontaneous emotion expression. The algorithm first estimates the location of a dense set of markers and their visibility, then reconstructs face shapes by fitting a part-based 3D model. Because no assumptions are required about illumination or surface properties, the method can be applied to a wide range of imaging conditions that include 2D video and uncalibrated multi-view video. The method has been validated in a battery of experiments that evaluate its precision of 3D reconstruction and extension to multi-view reconstruction. Experimental findings strongly support the validity of real-time, 3D registration and reconstruction from 2D video. The software is available online at http://zface.org.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To enable real-time, person-independent 3D registration from 2D video, we developed a 3D cascade regression approach in which facial landmarks remain invariant across pose over a range of approximately 60 degrees. From a single 2D image of a person's face, a dense 3D shape is registered in real time for each frame. The algorithm utilizes a fast cascade regression framework trained on high-resolution 3D face-scans of posed and spontaneous emotion expression. The algorithm first estimates the location of a dense set of markers and their visibility, then reconstructs face shapes by fitting a part-based 3D model. Because no assumptions are required about illumination or surface properties, the method can be applied to a wide range of imaging conditions that include 2D video and uncalibrated multi-view video. The method has been validated in a battery of experiments that evaluate its precision of 3D reconstruction and extension to multi-view reconstruction. Experimental findings strongly support the validity of real-time, 3D registration and reconstruction from 2D video. The software is available online at http://zface.org.",
"fno": "07163142",
"keywords": [
"Three Dimensional Displays",
"Face",
"Solid Modeling",
"Shape",
"Videos",
"Image Reconstruction",
"Training"
],
"authors": [
{
"affiliation": "Robot. Inst., Carnegie Mellon Univ., Pittsburgh, PA, USA",
"fullName": "Laszlo A. Jeni",
"givenName": "Laszlo A.",
"surname": "Jeni",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Robot. Inst., Carnegie Mellon Univ., Pittsburgh, PA, USA",
"fullName": "Jeffrey F. Cohn",
"givenName": "Jeffrey F.",
"surname": "Cohn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Robot. Inst., Carnegie Mellon Univ., Pittsburgh, PA, USA",
"fullName": "Takeo Kanade",
"givenName": "Takeo",
"surname": "Kanade",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-6026-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07163141",
"articleId": "12OmNwGIcy4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07163143",
"articleId": "12OmNzCWG3B",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2014/4871/0/06918653",
"title": "Fast, robust and automatic 3D face model reconstruction from videos",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2014/06918653/12OmNBkP3F9",
"parentPublication": {
"id": "proceedings/avss/2014/4871/0",
"title": "2014 International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d694",
"title": "Pose-Invariant 3D Face Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d694/12OmNBuL1jJ",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/07780823",
"title": "Large-Pose Face Alignment via CNN-Based Dense 3D Model Fitting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/07780823/12OmNqBbHze",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/2/01394433",
"title": "Towards robust face recognition from multiple views",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394433/12OmNx6g6kx",
"parentPublication": {
"id": "proceedings/icme/2004/8603/2",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a780",
"title": "Evaluation of Dense 3D Reconstruction from 2D Face Images in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a780/12OmNyPQ4HL",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2008/3456/0/3456a365",
"title": "3D Face Reconstruction from 2D Images",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2008/3456a365/12OmNyk3008",
"parentPublication": {
"id": "proceedings/dicta/2008/3456/0",
"title": "2008 Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/03/08571265",
"title": "Joint Face Alignment and 3D Face Reconstruction with Application to Face Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2020/03/08571265/17D45WnnFYh",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f216",
"title": "Disentangling Features in 3D Face Shapes for Joint Face Reconstruction and Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f216/17D45WrVgfL",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d090",
"title": "3D Face Shape Regression From 2D Videos with Multi-Reconstruction and Mesh Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d090/1i5mA86Bi6c",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d082",
"title": "The 2nd 3D Face Alignment in the Wild Challenge (3DFAW-Video): Dense Reconstruction From Video",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d082/1i5muuh7S6s",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz61du5",
"doi": "10.1109/ICCV.2017.567",
"title": "Editable Parametric Dense Foliage from 3D Capture",
"normalizedTitle": "Editable Parametric Dense Foliage from 3D Capture",
"abstract": "We present an algorithm to compute parametric models of dense foliage. The guiding principles of our work are automatic reconstruction and compact artist friendly representation. We use Bezier patches to model leaf surface, which we compute from images and point clouds of dense foliage. We present an algorithm to segment individual leaves from colour and depth data. We then reconstruct the Bezier representation from segmented leaf points clouds using non-linear optimisation. Unlike previous work, we do not require laboratory scanned exemplars or user intervention. We also demonstrate intuitive manipulators to edit the reconstructed parametric models. We believe our work is a step towards making captured data more accessible to artists for foliage modelling.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an algorithm to compute parametric models of dense foliage. The guiding principles of our work are automatic reconstruction and compact artist friendly representation. We use Bezier patches to model leaf surface, which we compute from images and point clouds of dense foliage. We present an algorithm to segment individual leaves from colour and depth data. We then reconstruct the Bezier representation from segmented leaf points clouds using non-linear optimisation. Unlike previous work, we do not require laboratory scanned exemplars or user intervention. We also demonstrate intuitive manipulators to edit the reconstructed parametric models. We believe our work is a step towards making captured data more accessible to artists for foliage modelling.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an algorithm to compute parametric models of dense foliage. The guiding principles of our work are automatic reconstruction and compact artist friendly representation. We use Bezier patches to model leaf surface, which we compute from images and point clouds of dense foliage. We present an algorithm to segment individual leaves from colour and depth data. We then reconstruct the Bezier representation from segmented leaf points clouds using non-linear optimisation. Unlike previous work, we do not require laboratory scanned exemplars or user intervention. We also demonstrate intuitive manipulators to edit the reconstructed parametric models. We believe our work is a step towards making captured data more accessible to artists for foliage modelling.",
"fno": "1032f315",
"keywords": [
"Image Colour Analysis",
"Image Reconstruction",
"Image Segmentation",
"Solid Modelling",
"Colour Data",
"Parametric Models",
"Dense Foliage Images",
"Captured Data",
"User Intervention",
"Laboratory Scanned Exemplars",
"Nonlinear Optimisation",
"Segmented Leaf Points Clouds",
"Bezier Representation",
"Depth Data",
"Segment Individual",
"Point Clouds",
"Model Leaf Surface",
"Bezier Patches",
"Compact Artist Friendly Representation",
"Automatic Reconstruction",
"3 D Capture",
"Editable Parametric Dense Foliage",
"Foliage Modelling",
"Three Dimensional Displays",
"Parametric Statistics",
"Shape",
"Splines Mathematics",
"Image Reconstruction",
"Surface Reconstruction",
"Semantics"
],
"authors": [
{
"affiliation": null,
"fullName": "Paul Beardsley",
"givenName": "Paul",
"surname": "Beardsley",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gaurav Chaurasia",
"givenName": "Gaurav",
"surname": "Chaurasia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "5315-5324",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032f306",
"articleId": "12OmNxG1yUL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032f325",
"articleId": "12OmNvsm6vz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2014/5720/0/5720a005",
"title": "An Approach to Describe Parametric Curves Using Hough-Based Arc Spline Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2014/5720a005/12OmNBv2CeI",
"parentPublication": {
"id": "proceedings/cgiv/2014/5720/0",
"title": "2014 11th International Conference on Computer Graphics, Imaging and Visualization (CGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2014/4284/0/4284a198",
"title": "Free-Form Deformation of Parametric Surfaces Based on Extension Function with Platform",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a198/12OmNC17hWN",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a126",
"title": "Scattered Data Points Fitting Using Ball B-Spline Curves Based on Particle Swarm Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a126/12OmNCga1Qh",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a701",
"title": "Filling Free-Form n-Sided Holes toward Blending and Decoration",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a701/12OmNz5apJL",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031a311",
"title": "A Universal Interpolation Algorithm for Parametric Curves",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031a311/12OmNzVXNIA",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/11/07378521",
"title": "Parametric Regression on the Grassmannian",
"doi": null,
"abstractUrl": "/journal/tp/2016/11/07378521/13rRUx0gegD",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08314702",
"title": "Efficient and Anti-Aliased Trimming for Rendering Large NURBS Models",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08314702/17D45VUZMUW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b052",
"title": "Dense 3D Point Cloud Reconstruction Using a Deep Pyramid Network",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b052/18j8MUuHgpG",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mascots/2019/4950/0/495000a309",
"title": "Detecting Parametric Dependencies for Performance Models Using Feature Selection Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/mascots/2019/495000a309/1gFJtMwvrjO",
"parentPublication": {
"id": "proceedings/mascots/2019/4950/0",
"title": "2019 IEEE 27th International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems (MASCOTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c232",
"title": "Moulding Humans: Non-Parametric 3D Human Shape Estimation From Single Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c232/1hVl9ihABwY",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzXFowZ",
"title": "2013 IEEE Workshop on Robot Vision (WORV 2013)",
"acronym": "worv",
"groupId": "1802687",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzSyCdb",
"doi": "10.1109/WORV.2013.6521928",
"title": "Dense range images from sparse point clouds using multi-scale processing",
"normalizedTitle": "Dense range images from sparse point clouds using multi-scale processing",
"abstract": "Multi-modal data processing based on visual and depth/range images has become relevant in computer vision for 3D reconstruction applications such as city modeling, robot navigation etc. In this paper, we generate high-accuracy dense range images from sparse point clouds to facilitate such applications. Our proposal addresses the problem of sparse data, mixed-pixels at the discontinuities and occlusions by combining multi-scale range images. The visual results show that our algorithm can create high-resolution dense range images with sharp discontinuities, while preserving the topology of objects even for environments that contain occlusions. To demonstrate the effectiveness of our approach, we propose an iterative perspective-to-point algorithm that aligns the edges between the color image and the range image from various viewpoints. The experimental results from 46 viewpoints show that the camera pose can be corrected when using high-accuracy dense range images, so that 3D reconstruction or 3D rendering can obtain a clearly higher quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Multi-modal data processing based on visual and depth/range images has become relevant in computer vision for 3D reconstruction applications such as city modeling, robot navigation etc. In this paper, we generate high-accuracy dense range images from sparse point clouds to facilitate such applications. Our proposal addresses the problem of sparse data, mixed-pixels at the discontinuities and occlusions by combining multi-scale range images. The visual results show that our algorithm can create high-resolution dense range images with sharp discontinuities, while preserving the topology of objects even for environments that contain occlusions. To demonstrate the effectiveness of our approach, we propose an iterative perspective-to-point algorithm that aligns the edges between the color image and the range image from various viewpoints. The experimental results from 46 viewpoints show that the camera pose can be corrected when using high-accuracy dense range images, so that 3D reconstruction or 3D rendering can obtain a clearly higher quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Multi-modal data processing based on visual and depth/range images has become relevant in computer vision for 3D reconstruction applications such as city modeling, robot navigation etc. In this paper, we generate high-accuracy dense range images from sparse point clouds to facilitate such applications. Our proposal addresses the problem of sparse data, mixed-pixels at the discontinuities and occlusions by combining multi-scale range images. The visual results show that our algorithm can create high-resolution dense range images with sharp discontinuities, while preserving the topology of objects even for environments that contain occlusions. To demonstrate the effectiveness of our approach, we propose an iterative perspective-to-point algorithm that aligns the edges between the color image and the range image from various viewpoints. The experimental results from 46 viewpoints show that the camera pose can be corrected when using high-accuracy dense range images, so that 3D reconstruction or 3D rendering can obtain a clearly higher quality.",
"fno": "06521928",
"keywords": [
"Image Edge Detection",
"Abstracts",
"Image Reconstruction",
"Image Resolution",
"Visualization"
],
"authors": [
{
"affiliation": "Eindhoven Univ. of Technolog, Eindhoven, Netherlands",
"fullName": "Luat Do",
"givenName": null,
"surname": "Luat Do",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Eindhoven Univ. of Technolog, Eindhoven, Netherlands",
"fullName": "Lingni Ma",
"givenName": null,
"surname": "Lingni Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Eindhoven Univ. of Technolog, Eindhoven, Netherlands",
"fullName": "P. H. N. de With",
"givenName": "P. H. N.",
"surname": "de With",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "worv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-01-01T00:00:00",
"pubType": "proceedings",
"pages": "138-143",
"year": "2013",
"issn": null,
"isbn": "978-1-4673-5646-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06521927",
"articleId": "12OmNrIJqCr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06521929",
"articleId": "12OmNB9KHsU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/1990/2062/1/00118205",
"title": "Detection of depth and orientation discontinuities in range images using mathematical morphology",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118205/12OmNApu5fF",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2007/2786/0/04228542",
"title": "Dense Stereo Range Sensing with Marching Pseudo-Random Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2007/04228542/12OmNrHSD0U",
"parentPublication": {
"id": "proceedings/crv/2007/2786/0",
"title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2920/0/00202023",
"title": "Detecting orientation discontinuities in range images by use of directional derivatives",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00202023/12OmNxUdv7L",
"parentPublication": {
"id": "proceedings/icpr/1992/2920/0",
"title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1993/3870/0/00378227",
"title": "Vision-based construction of CAD models from range images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378227/12OmNxX3uPh",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890288",
"title": "A new sparse feature-based patch for dense correspondence",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890288/12OmNyr8YpE",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2015/8660/0/8660a113",
"title": "A Robust Quasi-Dense Wide Baseline Matching Method",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2015/8660a113/12OmNzV70NQ",
"parentPublication": {
"id": "proceedings/cis/2015/8660/0",
"title": "2015 11th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a069",
"title": "Dense and Occlusion-Robust Multi-view Stereo for Unstructured Videos",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a069/12OmNzcPAyC",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545803",
"title": "Hallucinating Dense Optical Flow from Sparse Lidar for Autonomous Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545803/17D45WwsQ7c",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b052",
"title": "Dense 3D Point Cloud Reconstruction Using a Deep Pyramid Network",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b052/18j8MUuHgpG",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2005/2372/2/01467596",
"title": "A Markov random field approach for dense photometric stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2005/01467596/1htC5O3XIkw",
"parentPublication": {
"id": "proceedings/cvpr/2005/2372/2",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "14qdcP8Ivdv",
"title": "2018 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45W9KVI9",
"doi": "10.1109/3DV.2018.00031",
"title": "SegmentedFusion: 3D Human Body Reconstruction Using Stitched Bounding Boxes",
"normalizedTitle": "SegmentedFusion: 3D Human Body Reconstruction Using Stitched Bounding Boxes",
"abstract": "This paper presents SegmentedFusion, a method possessing the capability of reconstructing non-rigid 3D models of a human body by using a single depth camera with skeleton information. Our method estimates a dense volumetric 6D motion field that warps the integrated model into the live frame by segmenting a human body into different parts and building a canonical space for each part. The key feature of this work is that a deformed and connected canonical volume for each part is created, and it is used to integrate data. The dense volumetric warp field of one volume is represented efficiently by blending a few rigid transformations. Overall, SegmentedFusion is able to scan a non-rigidly deformed human surface as well as to estimate the dense motion field by using a consumer-grade depth camera. The experimental results demonstrate that SegmentedFusion is robust against fast inter-frame motion and topological changes. Since our method does not require prior assumption, SegmentedFusion can be applied to a wide range of human motions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents SegmentedFusion, a method possessing the capability of reconstructing non-rigid 3D models of a human body by using a single depth camera with skeleton information. Our method estimates a dense volumetric 6D motion field that warps the integrated model into the live frame by segmenting a human body into different parts and building a canonical space for each part. The key feature of this work is that a deformed and connected canonical volume for each part is created, and it is used to integrate data. The dense volumetric warp field of one volume is represented efficiently by blending a few rigid transformations. Overall, SegmentedFusion is able to scan a non-rigidly deformed human surface as well as to estimate the dense motion field by using a consumer-grade depth camera. The experimental results demonstrate that SegmentedFusion is robust against fast inter-frame motion and topological changes. Since our method does not require prior assumption, SegmentedFusion can be applied to a wide range of human motions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents SegmentedFusion, a method possessing the capability of reconstructing non-rigid 3D models of a human body by using a single depth camera with skeleton information. Our method estimates a dense volumetric 6D motion field that warps the integrated model into the live frame by segmenting a human body into different parts and building a canonical space for each part. The key feature of this work is that a deformed and connected canonical volume for each part is created, and it is used to integrate data. The dense volumetric warp field of one volume is represented efficiently by blending a few rigid transformations. Overall, SegmentedFusion is able to scan a non-rigidly deformed human surface as well as to estimate the dense motion field by using a consumer-grade depth camera. The experimental results demonstrate that SegmentedFusion is robust against fast inter-frame motion and topological changes. Since our method does not require prior assumption, SegmentedFusion can be applied to a wide range of human motions.",
"fno": "842500a190",
"keywords": [
"Cameras",
"Image Fusion",
"Image Reconstruction",
"Image Representation",
"Image Segmentation",
"Motion Estimation",
"Solid Modelling",
"Consumer Grade Depth Camera",
"Segmented Fusion",
"Human Motions",
"Stitched Bounding Boxes",
"Single Depth Camera",
"Canonical Space",
"Dense Volumetric Warp Field",
"Human Body Segmentation",
"Nonrigid 3 D Model Reconstruction",
"3 D Human Body Reconstruction",
"Skeleton Information",
"Dense Volumetric 6 D Motion Field Estimation",
"Deformed Canonical Volume",
"Connected Canonical Volume",
"Three Dimensional Displays",
"Bones",
"Solid Modeling",
"Biological System Modeling",
"Image Reconstruction",
"Joints",
"Human Body",
"3 D Reconstruction",
"RGB D Cameras",
"Fast Motion",
"Body Part Segmentation",
"Skeleton"
],
"authors": [
{
"affiliation": null,
"fullName": "Shih Hsuan Yao",
"givenName": "Shih Hsuan",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Diego Thomas",
"givenName": "Diego",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Akihiro Sugimoto",
"givenName": "Akihiro",
"surname": "Sugimoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shang-Hong Lai",
"givenName": "Shang-Hong",
"surname": "Lai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rin-Ichiro Taniguchi Kyushu",
"givenName": "Rin-Ichiro Taniguchi",
"surname": "Kyushu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-09-01T00:00:00",
"pubType": "proceedings",
"pages": "190-198",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-8425-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "842500a180",
"articleId": "17D45VTRoAb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "842500a199",
"articleId": "17D45XacGj6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/nicoint/2017/5332/0/5332a061",
"title": "Body-Shape Transfer for Super Deformation of 3D Character Models",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2017/5332a061/12OmNB8TU9V",
"parentPublication": {
"id": "proceedings/nicoint/2017/5332/0",
"title": "2017 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2016/4571/0/4571a271",
"title": "Robust Human Animation Skeleton Extraction Using Compatibility and Correctness Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a271/12OmNBkxssH",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2014/4284/0/4284a146",
"title": "A Fast 3-D Face Reconstruction Method",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a146/12OmNvjgWRV",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2001/7237/0/00982390",
"title": "Interactive modeling of the human musculature",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2001/00982390/12OmNxRF74d",
"parentPublication": {
"id": "proceedings/ca/2001/7237/0",
"title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07364274",
"title": "Parametric Human Body Reconstruction Based on Sparse Key Points",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07364274/13rRUygBw7d",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/08/07265099",
"title": "Realtime Reconstruction of an Animating Human Body from a Single Depth Camera",
"doi": null,
"abstractUrl": "/journal/tg/2016/08/07265099/13rRUygBwhL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a557",
"title": "A Structured Latent Space for Human Body Motion Generation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a557/1KYstZqofOE",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h738",
"title": "DeepHuman: 3D Human Reconstruction From a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h738/1hQqtWYyufS",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g010",
"title": "TetraTSDF: 3D Human Reconstruction From a Single Image With a Tetrahedral Outer Shell",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g010/1m3nuYPymoU",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a879",
"title": "PeeledHuman: Robust Shape Representation for Textured 3D Human Body Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a879/1qyxmAvhFD2",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmEmLKhf7q",
"doi": "10.1109/ICCV48922.2021.01071",
"title": "UltraPose: Synthesizing Dense Pose with 1 Billion Points by Human-body Decoupling 3D Model",
"normalizedTitle": "UltraPose: Synthesizing Dense Pose with 1 Billion Points by Human-body Decoupling 3D Model",
"abstract": "Recovering dense human poses from images plays a critical role in establishing an image-to-surface correspondence between RGB images and the 3D surface of the human body, serving the foundation of rich real-world applications, such as virtual humans, monocular-to-3d reconstruction. However, the popular DensePose-COCO dataset relies on a sophisticated manual annotation system, leading to severe limitations in acquiring the denser and more accurate annotated pose resources. In this work, we introduce a new 3D human-body model with a series of decoupled parameters that could freely control the generation of the body. Furthermore, we build a data generation system based on this decoupling 3D model, and construct an ultra dense synthetic benchmark UltraPose, containing around 1.3 billion corresponding points. Compared to the existing manually annotated DensePose-COCO dataset, the synthetic UltraPose has ultra dense image-to-surface correspondences without annotation cost and error. Our proposed UltraPose provides the largest benchmark and data resources for lifting the model capability in predicting more accurate dense poses. To promote future researches in this field, we also propose a transformer-based method to model the dense correspondence between 2D and 3D worlds. The proposed model trained on synthetic UltraPose can be applied to real-world scenarios, indicating the effectiveness of our benchmark and model.<sup>1</sup>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recovering dense human poses from images plays a critical role in establishing an image-to-surface correspondence between RGB images and the 3D surface of the human body, serving the foundation of rich real-world applications, such as virtual humans, monocular-to-3d reconstruction. However, the popular DensePose-COCO dataset relies on a sophisticated manual annotation system, leading to severe limitations in acquiring the denser and more accurate annotated pose resources. In this work, we introduce a new 3D human-body model with a series of decoupled parameters that could freely control the generation of the body. Furthermore, we build a data generation system based on this decoupling 3D model, and construct an ultra dense synthetic benchmark UltraPose, containing around 1.3 billion corresponding points. Compared to the existing manually annotated DensePose-COCO dataset, the synthetic UltraPose has ultra dense image-to-surface correspondences without annotation cost and error. Our proposed UltraPose provides the largest benchmark and data resources for lifting the model capability in predicting more accurate dense poses. To promote future researches in this field, we also propose a transformer-based method to model the dense correspondence between 2D and 3D worlds. The proposed model trained on synthetic UltraPose can be applied to real-world scenarios, indicating the effectiveness of our benchmark and model.<sup>1</sup>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recovering dense human poses from images plays a critical role in establishing an image-to-surface correspondence between RGB images and the 3D surface of the human body, serving the foundation of rich real-world applications, such as virtual humans, monocular-to-3d reconstruction. However, the popular DensePose-COCO dataset relies on a sophisticated manual annotation system, leading to severe limitations in acquiring the denser and more accurate annotated pose resources. In this work, we introduce a new 3D human-body model with a series of decoupled parameters that could freely control the generation of the body. Furthermore, we build a data generation system based on this decoupling 3D model, and construct an ultra dense synthetic benchmark UltraPose, containing around 1.3 billion corresponding points. Compared to the existing manually annotated DensePose-COCO dataset, the synthetic UltraPose has ultra dense image-to-surface correspondences without annotation cost and error. Our proposed UltraPose provides the largest benchmark and data resources for lifting the model capability in predicting more accurate dense poses. To promote future researches in this field, we also propose a transformer-based method to model the dense correspondence between 2D and 3D worlds. The proposed model trained on synthetic UltraPose can be applied to real-world scenarios, indicating the effectiveness of our benchmark and model.1",
"fno": "281200k0871",
"keywords": [
"Solid Modeling",
"Three Dimensional Displays",
"Annotations",
"Computational Modeling",
"Biological System Modeling",
"Benchmark Testing",
"Transformers",
"Datasets And Evaluation",
"3 D From A Single Image And Shape From X",
"Gestures And Body Pose"
],
"authors": [
{
"affiliation": "Beijing Momo Technology Co., Ltd.",
"fullName": "Haonan Yan",
"givenName": "Haonan",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sun Yat-sen University",
"fullName": "Jiaqi Chen",
"givenName": "Jiaqi",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sun Yat-sen University",
"fullName": "Xujie Zhang",
"givenName": "Xujie",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Momo Technology Co., Ltd.",
"fullName": "Shengkai Zhang",
"givenName": "Shengkai",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Momo Technology Co., Ltd.",
"fullName": "Nianhong Jiao",
"givenName": "Nianhong",
"surname": "Jiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sun Yat-sen University",
"fullName": "Xiaodan Liang",
"givenName": "Xiaodan",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Momo Technology Co., Ltd.",
"fullName": "Tianxiang Zheng",
"givenName": "Tianxiang",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "10871-10880",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200k0860",
"articleId": "1BmLqkEC28U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200k0881",
"articleId": "1BmKIhRemJ2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tp/2018/07/07973095",
"title": "Dense 3D Face Correspondence",
"doi": null,
"abstractUrl": "/journal/tp/2018/07/07973095/13rRUwInv5K",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671553",
"title": "NTU-DensePose: A New Benchmark for Dense Pose Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671553/1A8hiBBLVbW",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1709",
"title": "DensePose 3D: Lifting Canonical Surface Maps of Articulated Objects to the Third Dimension",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1709/1BmFyrOUHhS",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c708",
"title": "Accurate 3D Body Shape Regression using Metric and Semantic Attributes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c708/1H0LftmVn5S",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3276",
"title": "BodyMap: Learning Full-Body Dense Correspondence Map",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3276/1H1keO8tbFe",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0196",
"title": "Gait Recognition in the Wild with Dense 3D Representations and A Benchmark",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0196/1H1n3hCGHle",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300b067",
"title": "Pushing the Envelope for RGB-Based Dense 3D Hand Pose Estimation via Neural Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300b067/1gyreIktw6Q",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h759",
"title": "DenseRaC: Joint 3D Pose and Shape Estimation by Dense Render-and-Compare",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h759/1hVl9YsJ0Tm",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300f339",
"title": "Delving Deep Into Hybrid Annotations for 3D Human Recovery in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300f339/1hVlIAdYNX2",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h052",
"title": "3D Human Mesh Regression With Dense Correspondence",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h052/1m3o9diJQAw",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1keO8tbFe",
"doi": "10.1109/CVPR52688.2022.01293",
"title": "BodyMap: Learning Full-Body Dense Correspondence Map",
"normalizedTitle": "BodyMap: Learning Full-Body Dense Correspondence Map",
"abstract": "Dense correspondence between humans carries powerful semantic information that can be utilized to solve fundamental problems for full-body understanding such as in-the-wild surface matching, tracking and reconstruction. In this paper we present BodyMap, a new framework for obtaining high-definition full-body and continuous dense correspondence between in-the-wild images of clothed humans and the surface of a 3D template model. The correspondences cover fine details such as hands and hair, while capturing regions far from the body surface, such as loose clothing. Prior methods for estimating such dense surface correspondence i) cut a 3D body into parts which are unwrapped to a 2D UV space, producing discontinuities along part seams, or ii) use a single surface for representing the whole body, but none handled body details. Here, we introduce a novel network architecture with Vision Transformers that learn fine-level features on a continuous body surface. BodyMap outperforms prior work on various metrics and datasets, including DensePose-COCO by a large margin. Furthermore, we show various applications ranging from multi-layer dense cloth correspondence, neural rendering with novel-view synthesis and appearance swapping.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dense correspondence between humans carries powerful semantic information that can be utilized to solve fundamental problems for full-body understanding such as in-the-wild surface matching, tracking and reconstruction. In this paper we present BodyMap, a new framework for obtaining high-definition full-body and continuous dense correspondence between in-the-wild images of clothed humans and the surface of a 3D template model. The correspondences cover fine details such as hands and hair, while capturing regions far from the body surface, such as loose clothing. Prior methods for estimating such dense surface correspondence i) cut a 3D body into parts which are unwrapped to a 2D UV space, producing discontinuities along part seams, or ii) use a single surface for representing the whole body, but none handled body details. Here, we introduce a novel network architecture with Vision Transformers that learn fine-level features on a continuous body surface. BodyMap outperforms prior work on various metrics and datasets, including DensePose-COCO by a large margin. Furthermore, we show various applications ranging from multi-layer dense cloth correspondence, neural rendering with novel-view synthesis and appearance swapping.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dense correspondence between humans carries powerful semantic information that can be utilized to solve fundamental problems for full-body understanding such as in-the-wild surface matching, tracking and reconstruction. In this paper we present BodyMap, a new framework for obtaining high-definition full-body and continuous dense correspondence between in-the-wild images of clothed humans and the surface of a 3D template model. The correspondences cover fine details such as hands and hair, while capturing regions far from the body surface, such as loose clothing. Prior methods for estimating such dense surface correspondence i) cut a 3D body into parts which are unwrapped to a 2D UV space, producing discontinuities along part seams, or ii) use a single surface for representing the whole body, but none handled body details. Here, we introduce a novel network architecture with Vision Transformers that learn fine-level features on a continuous body surface. BodyMap outperforms prior work on various metrics and datasets, including DensePose-COCO by a large margin. Furthermore, we show various applications ranging from multi-layer dense cloth correspondence, neural rendering with novel-view synthesis and appearance swapping.",
"fno": "694600n3276",
"keywords": [
"Clothing",
"Computational Geometry",
"Computer Vision",
"Feature Extraction",
"Image Matching",
"Image Motion Analysis",
"Image Reconstruction",
"Image Representation",
"Image Sequences",
"Image Texture",
"Learning Artificial Intelligence",
"Pose Estimation",
"Rendering Computer Graphics",
"Solid Modelling",
"Stereo Image Processing",
"Loose Clothing",
"Dense Surface Correspondence I",
"2 D UV Space",
"Part Seams",
"Single Surface",
"Body Details",
"Fine Level Features",
"Continuous Body Surface",
"Body Map",
"Multilayer Dense Cloth Correspondence",
"Full Body Dense Correspondence",
"Powerful Semantic Information",
"Full Body Understanding",
"High Definition Full Body",
"Continuous Dense Correspondence",
"In The Wild Images",
"Clothed Humans",
"3 D Template Model",
"Hair",
"Surface Reconstruction",
"Solid Modeling",
"Three Dimensional Displays",
"Tracking",
"Semantics",
"Pose Estimation",
"Network Architecture"
],
"authors": [
{
"affiliation": "Moscow Institute of Physics and Technology",
"fullName": "Anastasia Ianina",
"givenName": "Anastasia",
"surname": "Ianina",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta Reality Labs Research,Sausalito",
"fullName": "Nikolaos Sarafianos",
"givenName": "Nikolaos",
"surname": "Sarafianos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta Reality Labs Research,Sausalito",
"fullName": "Yuanlu Xu",
"givenName": "Yuanlu",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta AI",
"fullName": "Ignacio Rocco",
"givenName": "Ignacio",
"surname": "Rocco",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta Reality Labs Research,Sausalito",
"fullName": "Tony Tung",
"givenName": "Tony",
"surname": "Tung",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "13276-13285",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1kesw7jsA",
"name": "pcvpr202269460-09878575s1-mm_694600n3276.zip",
"size": "18.5 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878575s1-mm_694600n3276.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600n3264",
"articleId": "1H1mTqlw5pe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600n3286",
"articleId": "1H1mmo6BS1O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457426",
"title": "Posture invariant correspondence of triangular meshes in shape space",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457426/12OmNzVoBPz",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000h297",
"title": "DensePose: Dense Human Pose Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000h297/17D45WZZ7Dy",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0871",
"title": "UltraPose: Synthesizing Dense Pose with 1 Billion Points by Human-body Decoupling 3D Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0871/1BmEmLKhf7q",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g739",
"title": "SurfEmb: Dense and Continuous Correspondence Distributions for Object Pose Estimation with Learnt Surface Embeddings",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g739/1H0N66LW3U4",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5862",
"title": "Surface-Aligned Neural Radiance Fields for Controllable 3D Human Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5862/1H1i16qJfFu",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3264",
"title": "Capturing and Inferring Dense Full-Body Human-Scene Contact",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3264/1H1mTqlw5pe",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0876",
"title": "HoloPose: Holistic 3D Human Reconstruction In-The-Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0876/1gys6xE6zjG",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h052",
"title": "3D Human Mesh Regression With Dense Correspondence",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h052/1m3o9diJQAw",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/05/09279291",
"title": "Learning 3D Human Shape and Pose From Dense Body Parts",
"doi": null,
"abstractUrl": "/journal/tp/2022/05/09279291/1pg8uVy3PjO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09565319",
"title": "DPODv2: Dense Correspondence-Based 6 DoF Pose Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09565319/1xx87EXOVOw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1mBvBI7iU",
"doi": "10.1109/CVPR52688.2022.01251",
"title": "SPAMs: Structured Implicit Parametric Models",
"normalizedTitle": "SPAMs: Structured Implicit Parametric Models",
"abstract": "Parametric 3D models have formed a fundamental role in modeling deformable objects, such as human bodies, faces, and hands; however, the construction of such parametric models requires significant manual intervention and domain expertise. Recently, neural implicit 3D representations have shown great expressibility in capturing 3D shape geometry. We observe that deformable object motion is often semantically structured, and thus propose to learn Structured-implicit PArametric Models (SPAMs) as a deformable object representation that structurally decomposes non-rigid object motion into part-based disentangled representations of shape and pose, with each being represented by deep implicit functions. This enables a structured characterization of object movement, with part decomposition characterizing a lower-dimensional space in which we can establish coarse motion correspondence. In particular, we can leverage the part decompositions at test time to fit to new depth sequences of unobserved shapes, by establishing part correspondences between the input observation and our learned part spaces; this guides a robust joint optimization between the shape and pose of all parts, even under dramatic motion sequences. Experiments demonstrate that our part-aware shape and pose understanding lead to state-of-the-art performance in reconstruction and tracking of depth sequences of complex deforming object motion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Parametric 3D models have formed a fundamental role in modeling deformable objects, such as human bodies, faces, and hands; however, the construction of such parametric models requires significant manual intervention and domain expertise. Recently, neural implicit 3D representations have shown great expressibility in capturing 3D shape geometry. We observe that deformable object motion is often semantically structured, and thus propose to learn Structured-implicit PArametric Models (SPAMs) as a deformable object representation that structurally decomposes non-rigid object motion into part-based disentangled representations of shape and pose, with each being represented by deep implicit functions. This enables a structured characterization of object movement, with part decomposition characterizing a lower-dimensional space in which we can establish coarse motion correspondence. In particular, we can leverage the part decompositions at test time to fit to new depth sequences of unobserved shapes, by establishing part correspondences between the input observation and our learned part spaces; this guides a robust joint optimization between the shape and pose of all parts, even under dramatic motion sequences. Experiments demonstrate that our part-aware shape and pose understanding lead to state-of-the-art performance in reconstruction and tracking of depth sequences of complex deforming object motion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Parametric 3D models have formed a fundamental role in modeling deformable objects, such as human bodies, faces, and hands; however, the construction of such parametric models requires significant manual intervention and domain expertise. Recently, neural implicit 3D representations have shown great expressibility in capturing 3D shape geometry. We observe that deformable object motion is often semantically structured, and thus propose to learn Structured-implicit PArametric Models (SPAMs) as a deformable object representation that structurally decomposes non-rigid object motion into part-based disentangled representations of shape and pose, with each being represented by deep implicit functions. This enables a structured characterization of object movement, with part decomposition characterizing a lower-dimensional space in which we can establish coarse motion correspondence. In particular, we can leverage the part decompositions at test time to fit to new depth sequences of unobserved shapes, by establishing part correspondences between the input observation and our learned part spaces; this guides a robust joint optimization between the shape and pose of all parts, even under dramatic motion sequences. Experiments demonstrate that our part-aware shape and pose understanding lead to state-of-the-art performance in reconstruction and tracking of depth sequences of complex deforming object motion.",
"fno": "694600m2841",
"keywords": [
"Computational Geometry",
"Image Motion Analysis",
"Image Reconstruction",
"Image Representation",
"Image Sequences",
"Knowledge Based Systems",
"Learning Artificial Intelligence",
"Optimisation",
"Solid Modelling",
"SPA Ms",
"Parametric 3 D Models",
"Deformable Objects",
"Human Bodies",
"Significant Manual Intervention",
"Domain Expertise",
"Neural Implicit 3 D Representations",
"Great Expressibility",
"Capturing 3 D",
"Deformable Object Motion",
"Structured Implicit P Arametric Models",
"Deformable Object Representation",
"Nonrigid Object Motion",
"Part Based Disentangled Representations",
"Deep Implicit Functions",
"Structured Characterization",
"Object Movement",
"Part Decomposition",
"Lower Dimensional Space",
"Coarse Motion Correspondence",
"Depth Sequences",
"Unobserved Shapes",
"Part Correspondences",
"Learned Part Spaces",
"Dramatic Motion Sequences",
"Part Aware Shape",
"Complex Deforming Object Motion",
"Deformable Models",
"Solid Modeling",
"Three Dimensional Displays",
"Shape",
"Tracking",
"Robustness",
"Parametric Statistics"
],
"authors": [
{
"affiliation": "Technical University of Munich",
"fullName": "Pablo Palafox",
"givenName": "Pablo",
"surname": "Palafox",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta Reality Labs Research,Sausalito,USA",
"fullName": "Nikolaos Sarafianos",
"givenName": "Nikolaos",
"surname": "Sarafianos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta Reality Labs Research,Sausalito,USA",
"fullName": "Tony Tung",
"givenName": "Tony",
"surname": "Tung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Angela Dai",
"givenName": "Angela",
"surname": "Dai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "12841-12850",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1mBs6QzVC",
"name": "pcvpr202269460-09878900s1-mm_694600m2841.zip",
"size": "1.62 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878900s1-mm_694600m2841.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600m2830",
"articleId": "1H1kbtVJZbq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600m2851",
"articleId": "1H1kFc1BMLS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1989/1952/0/00037834",
"title": "Parametrically deformable contour models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1989/00037834/12OmNAKcNNy",
"parentPublication": {
"id": "proceedings/cvpr/1989/1952/0",
"title": "1989 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1996/7258/0/72580293",
"title": "Global Models with Parametric Offsets as Applied to Cardiac Motion Recovery",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1996/72580293/12OmNAoDiln",
"parentPublication": {
"id": "proceedings/cvpr/1996/7258/0",
"title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576380",
"title": "Shape approximation: from multiview range images to parametric geons",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576380/12OmNvw2Te2",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118b813",
"title": "Automatic Construction of Deformable Models In-the-Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118b813/12OmNwpXRUq",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2001/1272/1/127210485",
"title": "Improving the Scope of Deformable Model Shape and Motion Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2001/127210485/12OmNxG1yJr",
"parentPublication": {
"id": "proceedings/cvpr/2001/1272/1",
"title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a756",
"title": "Learning Shape, Motion and Elastic Models in Force Space",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a756/12OmNyrIayP",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2006/2825/0/04155736",
"title": "Deformable Mesh Model for Complex Multi-Object 3D Motion Estimation from Multi-Viewpoint Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/04155736/12OmNzCWG3v",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2001/05/i0475",
"title": "Deformable Shape Detection and Description via Model-Based Region Grouping",
"doi": null,
"abstractUrl": "/journal/tp/2001/05/i0475/13rRUxAASXc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2000/05/i0544",
"title": "Object Tracking Using Deformable Templates",
"doi": null,
"abstractUrl": "/journal/tp/2000/05/i0544/13rRUxBa5sM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b170",
"title": "Learning Hierarchical Models for Class-Specific Reconstruction from Natural Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b170/17D45WZZ7DD",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxjjEbs",
"title": "2011 International Conference on Business Computing and Global Informatization",
"acronym": "bcgin",
"groupId": "1800481",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAkWvE6",
"doi": "10.1109/BCGIn.2011.84",
"title": "Fast and High Accuracy Numerical Methods for Solving PDEs in Computational Finance",
"normalizedTitle": "Fast and High Accuracy Numerical Methods for Solving PDEs in Computational Finance",
"abstract": "We develop a new W-cycle multiscale multigrid method that can use the existing multilevel (different scale) grid hierarchy to approximate the sixth order solution of Poisson equation based on the fourth order discretization schemes. Richardson extrapolation procedure is used on the fine grid level in multigrid method. Numerical results are conducted to show the solution accuracy and the computational efficiency of our new method, compared to Wang-Zhang's sixth order multiscale multigrid method using V-cycle.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We develop a new W-cycle multiscale multigrid method that can use the existing multilevel (different scale) grid hierarchy to approximate the sixth order solution of Poisson equation based on the fourth order discretization schemes. Richardson extrapolation procedure is used on the fine grid level in multigrid method. Numerical results are conducted to show the solution accuracy and the computational efficiency of our new method, compared to Wang-Zhang's sixth order multiscale multigrid method using V-cycle.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We develop a new W-cycle multiscale multigrid method that can use the existing multilevel (different scale) grid hierarchy to approximate the sixth order solution of Poisson equation based on the fourth order discretization schemes. Richardson extrapolation procedure is used on the fine grid level in multigrid method. Numerical results are conducted to show the solution accuracy and the computational efficiency of our new method, compared to Wang-Zhang's sixth order multiscale multigrid method using V-cycle.",
"fno": "4464a307",
"keywords": [
"Multigrid Method",
"Poisson Equation",
"W Cycle"
],
"authors": [
{
"affiliation": null,
"fullName": "Yin Wang",
"givenName": "Yin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kun Hua",
"givenName": "Kun",
"surname": "Hua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jun Zhang",
"givenName": "Jun",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bcgin",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "307-310",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4464-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4464a304",
"articleId": "12OmNwqft05",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4464a311",
"articleId": "12OmNASraEw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cmpeur/1989/1940/0/00093482",
"title": "Locally adaptive multigrid method for 3D numerical investigation of semiconductor devices",
"doi": null,
"abstractUrl": "/proceedings-article/cmpeur/1989/00093482/12OmNvA1hzc",
"parentPublication": {
"id": "proceedings/cmpeur/1989/1940/0",
"title": "COMPEURO 89 Proceedings VLSI and Computer Peripherals.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07364293",
"title": "Solving the Fluid Pressure Poisson Equation Using Multigrid—Evaluation and Improvements",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07364293/13rRUwvBy8Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1990/05/i0435",
"title": "Direct Analytical Methods for Solving Poisson Equations in Computer Vision Problems",
"doi": null,
"abstractUrl": "/journal/tp/1990/05/i0435/13rRUx0xPUF",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06171181",
"title": "A Multigrid Fluid Pressure Solver Handling Separating Solid Boundary Conditions",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06171181/13rRUxlgxTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCaLEmR",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"acronym": "iccis",
"groupId": "1800262",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCbU2S7",
"doi": "10.1109/ICCIS.2012.17",
"title": "A High Order Compact Difference Scheme and Multigrid Method for Solving the 3D Convection Diffusion Equation on Non-uniform Grids",
"normalizedTitle": "A High Order Compact Difference Scheme and Multigrid Method for Solving the 3D Convection Diffusion Equation on Non-uniform Grids",
"abstract": "A high order compact (HOC) difference scheme is proposed to solve the 3D convection diffusion equation on non-uniform Cartesian grids involving no transformation from the physical space to the computational space. A multigrid method based on this HOC scheme is developed to solve the linear system arising from the difference equation. Numerical experiments about a boundary layer problem are conducted to show the computed accuracy of the HOC scheme and the computational efficiency of the multigrid method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A high order compact (HOC) difference scheme is proposed to solve the 3D convection diffusion equation on non-uniform Cartesian grids involving no transformation from the physical space to the computational space. A multigrid method based on this HOC scheme is developed to solve the linear system arising from the difference equation. Numerical experiments about a boundary layer problem are conducted to show the computed accuracy of the HOC scheme and the computational efficiency of the multigrid method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A high order compact (HOC) difference scheme is proposed to solve the 3D convection diffusion equation on non-uniform Cartesian grids involving no transformation from the physical space to the computational space. A multigrid method based on this HOC scheme is developed to solve the linear system arising from the difference equation. Numerical experiments about a boundary layer problem are conducted to show the computed accuracy of the HOC scheme and the computational efficiency of the multigrid method.",
"fno": "4789a714",
"keywords": [
"Equations",
"Multigrid Methods",
"Accuracy",
"Interpolation",
"Strontium",
"Mathematical Model",
"Computational Efficiency",
"Boundary Layer",
"Convection Diffusion Equations",
"High Order Compact Scheme",
"Non Uniform Grids",
"Multigrid Method"
],
"authors": [
{
"affiliation": null,
"fullName": "Yongbin Ge",
"givenName": "Yongbin",
"surname": "Ge",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fujun Cao",
"givenName": "Fujun",
"surname": "Cao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-08-01T00:00:00",
"pubType": "proceedings",
"pages": "714-717",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2406-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4789a710",
"articleId": "12OmNx7G5RE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4789a718",
"articleId": "12OmNvT2oW9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscid/2010/4198/1/4198a230",
"title": "A High Accuracy Compact Difference on Non-uniform Grid for 2D Convection Diffusion Reaction Equation",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2010/4198a230/12OmNBU1jKa",
"parentPublication": {
"id": "proceedings/iscid/2010/4198/1",
"title": "Computational Intelligence and Design, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2009/3605/2/3605c641",
"title": "An Implicit Finite Difference Scheme with Preconditioning for Convection Dominated Diffusion Equation",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2009/3605c641/12OmNwt5skm",
"parentPublication": {
"id": "proceedings/cso/2009/3605/2",
"title": "2009 International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcs/2005/2343/0/01430049",
"title": "The study and implementation of the multigrid algorithm for the 3-D transport difference equation",
"doi": null,
"abstractUrl": "/proceedings-article/hpcs/2005/01430049/12OmNyKa69k",
"parentPublication": {
"id": "proceedings/hpcs/2005/2343/0",
"title": "19th International Symposium on High Performance Computing Systems and Applications (HPCS'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ftcs/1999/0213/0/02130012",
"title": "An Algorithm Based Error Detection Scheme for the Multigrid Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ftcs/1999/02130012/12OmNynJMIU",
"parentPublication": {
"id": "proceedings/ftcs/1999/0213/0",
"title": "Fault-Tolerant Computing, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccie/2010/4026/1/4026a227",
"title": "A High Accuracy Difference Scheme of the Convection Equation",
"doi": null,
"abstractUrl": "/proceedings-article/ccie/2010/4026a227/12OmNzSQdsn",
"parentPublication": {
"id": "proceedings/ccie/2010/4026/1",
"title": "Computing, Control and Industrial Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2011/4501/0/4501b087",
"title": "A High-Order Compact ADI Scheme for the 3D Unsteady Convection-Diffusion Equation",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2011/4501b087/12OmNzcxZoc",
"parentPublication": {
"id": "proceedings/iccis/2011/4501/0",
"title": "2011 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2003/09/t1089",
"title": "An Algorithm-Based Error Detection Scheme for the Multigrid Method",
"doi": null,
"abstractUrl": "/journal/tc/2003/09/t1089/13rRUEgs2Lb",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07364293",
"title": "Solving the Fluid Pressure Poisson Equation Using Multigrid—Evaluation and Improvements",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07364293/13rRUwvBy8Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1986/11/01676698",
"title": "Multigrid Algorithms on the Hypercube Multiprocessor",
"doi": null,
"abstractUrl": "/journal/tc/1986/11/01676698/13rRUxlgy2A",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2006/06/c6012",
"title": "Why Multigrid Methods Are So Efficient",
"doi": null,
"abstractUrl": "/magazine/cs/2006/06/c6012/13rRUy2YLOO",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxWcHef",
"title": "COMPEURO 89 Proceedings VLSI and Computer Peripherals.",
"acronym": "cmpeur",
"groupId": "1000110",
"volume": "0",
"displayVolume": "0",
"year": "1989",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvA1hzc",
"doi": "10.1109/CMPEUR.1989.93482",
"title": "Locally adaptive multigrid method for 3D numerical investigation of semiconductor devices",
"normalizedTitle": "Locally adaptive multigrid method for 3D numerical investigation of semiconductor devices",
"abstract": "The authors present recent developments in the multigrid semiconductor device simulation program COGITO. A locally adaptive refinement strategy has been implemented. The electron and hole continuity equations have been incorporated into the solution procedure. Refinement criteria and interpolation topics are discussed in particular. The solution of a three-dimensional problem is presented. It is demonstrated that the Poisson equation in one to three dimensions for the zero-current case can be solved. Steep gradients are well resolved by adaptive refinement. The full classical equation system including the continuity equations can be solved in one dimension under a selected bias.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors present recent developments in the multigrid semiconductor device simulation program COGITO. A locally adaptive refinement strategy has been implemented. The electron and hole continuity equations have been incorporated into the solution procedure. Refinement criteria and interpolation topics are discussed in particular. The solution of a three-dimensional problem is presented. It is demonstrated that the Poisson equation in one to three dimensions for the zero-current case can be solved. Steep gradients are well resolved by adaptive refinement. The full classical equation system including the continuity equations can be solved in one dimension under a selected bias.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors present recent developments in the multigrid semiconductor device simulation program COGITO. A locally adaptive refinement strategy has been implemented. The electron and hole continuity equations have been incorporated into the solution procedure. Refinement criteria and interpolation topics are discussed in particular. The solution of a three-dimensional problem is presented. It is demonstrated that the Poisson equation in one to three dimensions for the zero-current case can be solved. Steep gradients are well resolved by adaptive refinement. The full classical equation system including the continuity equations can be solved in one dimension under a selected bias.",
"fno": "00093482",
"keywords": [
"Circuit Analysis Computing",
"Interpolation",
"Semiconductor Devices",
"Locally Adaptive Multigrid Method",
"Electron Continuity Equations",
"Steep Gradients",
"3 D Numerical Investigation",
"Semiconductor Devices",
"Simulation Program COGITO",
"Adaptive Refinement Strategy",
"Hole Continuity Equations",
"Interpolation",
"Poisson Equation",
"Multigrid Methods",
"Semiconductor Devices",
"Tree Data Structures",
"Grid Computing",
"Computational Modeling",
"Poisson Equations",
"Charge Carrier Processes",
"Interpolation",
"Numerical Analysis",
"Very Large Scale Integration"
],
"authors": [
{
"affiliation": "Dept. of Tech. Electron., Tech. Univ., Hamburg-Harburg, West Germany",
"fullName": "P. Conradi",
"givenName": "P.",
"surname": "Conradi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Tech. Electron., Tech. Univ., Hamburg-Harburg, West Germany",
"fullName": "D. Schroeder",
"givenName": "D.",
"surname": "Schroeder",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cmpeur",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1989-01-01T00:00:00",
"pubType": "proceedings",
"pages": "5/52-5/54",
"year": "1989",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00093481",
"articleId": "12OmNCu4ndB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00093483",
"articleId": "12OmNzDehbX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bcgin/2011/4464/0/4464a307",
"title": "Fast and High Accuracy Numerical Methods for Solving PDEs in Computational Finance",
"doi": null,
"abstractUrl": "/proceedings-article/bcgin/2011/4464a307/12OmNAkWvE6",
"parentPublication": {
"id": "proceedings/bcgin/2011/4464/0",
"title": "2011 International Conference on Business Computing and Global Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1989/1933/0/00072509",
"title": "Partitioned-charge-based modeling: a new approach",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1989/00072509/12OmNAmE5Z3",
"parentPublication": {
"id": "proceedings/ssst/1989/1933/0",
"title": "1989 The Twenty-First Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/physics/1995/7321/0/73210043",
"title": "Simulation of semiconductor plasma on multiprocessor computer systems",
"doi": null,
"abstractUrl": "/proceedings-article/physics/1995/73210043/12OmNBSBk7A",
"parentPublication": {
"id": "proceedings/physics/1995/7321/0",
"title": "Physics and Modeling of Devices Based on Low-Dimensional Structures, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fmpc/1988/5892/0/00047432",
"title": "Parallel algorithm for the solution of nonlinear Poisson equation of semiconductor device theory and its implementation on the MPP",
"doi": null,
"abstractUrl": "/proceedings-article/fmpc/1988/00047432/12OmNvSbBBo",
"parentPublication": {
"id": "proceedings/fmpc/1988/5892/0",
"title": "Proceedings 2nd Symposium on the Frontiers of Massively Parallel Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccad/1993/4490/0/00580068",
"title": "Accelerated waveform methods for parallel transient simulation of semiconductor devices",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/1993/00580068/12OmNx6xHlC",
"parentPublication": {
"id": "proceedings/iccad/1993/4490/0",
"title": "Proceedings of 1993 International Conference on Computer Aided Design (ICCAD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2009/3595/0/3595a108",
"title": "Finite Elements in Semiconductor Devices",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2009/3595a108/12OmNzdoMCq",
"parentPublication": {
"id": "proceedings/icime/2009/3595/0",
"title": "Information Management and Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567208",
"title": "Investigation of mode amplification in QD semiconductor optical amplifier",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567208/12OmNzmclRy",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2011/01/ttd2011010022",
"title": "Cyclic Reduction Tridiagonal Solvers on GPUs Applied to Mixed-Precision Multigrid",
"doi": null,
"abstractUrl": "/journal/td/2011/01/ttd2011010022/13rRUwd9CFF",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07364293",
"title": "Solving the Fluid Pressure Poisson Equation Using Multigrid—Evaluation and Improvements",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07364293/13rRUwvBy8Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fit/2019/6625/0/662500a061",
"title": "Numerical Modeling of One Dimensional Unsteady Flow in Parabolic Channel",
"doi": null,
"abstractUrl": "/proceedings-article/fit/2019/662500a061/1hrJxtfIDMk",
"parentPublication": {
"id": "proceedings/fit/2019/6625/0",
"title": "2019 International Conference on Frontiers of Information Technology (FIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBhpS6N",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"acronym": "iih-msp",
"groupId": "1001543",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyoiYXH",
"doi": "10.1109/IIH-MSP.2008.28",
"title": "Image Editing without Color Inconsistency Using Modified Poisson Equation",
"normalizedTitle": "Image Editing without Color Inconsistency Using Modified Poisson Equation",
"abstract": "We propose an improved seamless image editing method based on the Poisson equation. By adding an additional inner Dirichlet boundary condition and magnify large Laplacian values corresponding to the objectpsilas true contour in the edited region, the method can insert objects into background of the target image seamlessly, and solve the color inconsistency problem caused by boundary influence. The proposed method does not require precise definition of the object contour, but only needs to choose a rough region of the inserted object and the target location for automatic processing. Experimental results show that the method can nicely insert objects from a different image or a different region of the same image into a targeted background area without causing color inconsistency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose an improved seamless image editing method based on the Poisson equation. By adding an additional inner Dirichlet boundary condition and magnify large Laplacian values corresponding to the objectpsilas true contour in the edited region, the method can insert objects into background of the target image seamlessly, and solve the color inconsistency problem caused by boundary influence. The proposed method does not require precise definition of the object contour, but only needs to choose a rough region of the inserted object and the target location for automatic processing. Experimental results show that the method can nicely insert objects from a different image or a different region of the same image into a targeted background area without causing color inconsistency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose an improved seamless image editing method based on the Poisson equation. By adding an additional inner Dirichlet boundary condition and magnify large Laplacian values corresponding to the objectpsilas true contour in the edited region, the method can insert objects into background of the target image seamlessly, and solve the color inconsistency problem caused by boundary influence. The proposed method does not require precise definition of the object contour, but only needs to choose a rough region of the inserted object and the target location for automatic processing. Experimental results show that the method can nicely insert objects from a different image or a different region of the same image into a targeted background area without causing color inconsistency.",
"fno": "3278a397",
"keywords": [
"Image Colour Analysis",
"Laplace Equations",
"Poisson Equation",
"Color Inconsistency",
"Poisson Equation",
"Image Editing Method",
"Inner Dirichlet Boundary Condition",
"Laplacian Values",
"Image Color Analysis",
"Poisson Equations",
"Boundary Conditions",
"Equations",
"Mathematical Model",
"Interpolation",
"Pixel",
"Color Inconsistency"
],
"authors": [
{
"affiliation": "Sch. of Commun. & Inf. Eng., Shanghai Univ., Shanghai",
"fullName": "Chuan Qin",
"givenName": "Chuan",
"surname": "Qin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Commun. & Inf. Eng., Shanghai Univ., Shanghai",
"fullName": "Shuozhong Wang",
"givenName": "Shuozhong",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Commun. & Inf. Eng., Shanghai Univ., Shanghai",
"fullName": "Xinpeng Zhang",
"givenName": "Xinpeng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iih-msp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-08-01T00:00:00",
"pubType": "proceedings",
"pages": "397-401",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3278-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3278a327",
"articleId": "12OmNzdGnsS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3278a331",
"articleId": "12OmNrJRPnU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdh/2014/4284/0/4284a101",
"title": "Image Edit by Boundary Difference Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a101/12OmNAhxjB9",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/superc/2003/695/0/01592926",
"title": "SCALLOP: A Highly Scalable Parallel Poisson Solver in Three Dimensions",
"doi": null,
"abstractUrl": "/proceedings-article/superc/2003/01592926/12OmNAtaS2i",
"parentPublication": {
"id": "proceedings/superc/2003/695/0",
"title": "ACM/IEEE SC 2003 Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139751",
"title": "Boundary element methods for solving Poisson equations in computer vision problems",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139751/12OmNBd9T57",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/2/01315145",
"title": "Shape representation and classification using the Poisson equation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315145/12OmNqBKU9B",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/2",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223239",
"title": "On Poisson solvers and semi-direct methods for computing area based optical flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223239/12OmNwbuke1",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mines/2010/4258/0/4258a902",
"title": "Secure Image Delivery Scheme Using Poisson Editing",
"doi": null,
"abstractUrl": "/proceedings-article/mines/2010/4258a902/12OmNzyYib7",
"parentPublication": {
"id": "proceedings/mines/2010/4258/0",
"title": "Multimedia Information Networking and Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2006/04/c4032",
"title": "Numerical Solution of the Stationary State Schrödinger Equation Using Transparent Boundary Conditions",
"doi": null,
"abstractUrl": "/magazine/cs/2006/04/c4032/13rRUEgs2PW",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/02/08449116",
"title": "Poisson Vector Graphics (PVG)",
"doi": null,
"abstractUrl": "/journal/tg/2020/02/08449116/13rRUyeCkaq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icftic/2022/2195/0/10075200",
"title": "Study on the Fourth Order Discrete Scheme of Finite Difference Method Based on Node Set Vector for Two Dimensional Poisson Equation",
"doi": null,
"abstractUrl": "/proceedings-article/icftic/2022/10075200/1LRl0zXfIJi",
"parentPublication": {
"id": "proceedings/icftic/2022/2195/0",
"title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2003/2113/0/01592926",
"title": "SCALLOP: A Highly Scalable Parallel Poisson Solver in Three Dimensions",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2003/01592926/1MEX1c5niww",
"parentPublication": {
"id": "proceedings/sc/2003/2113/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "18jXJQCMYNi",
"title": "2018 IEEE/ACM 9th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (scalA)",
"acronym": "scala",
"groupId": "1805696",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "18jXKefKL6g",
"doi": "10.1109/ScalA.2018.00006",
"title": "Communication Avoiding Multigrid Preconditioned Conjugate Gradient Method for Extreme Scale Multiphase CFD Simulations",
"normalizedTitle": "Communication Avoiding Multigrid Preconditioned Conjugate Gradient Method for Extreme Scale Multiphase CFD Simulations",
"abstract": "A communication avoiding (CA) multigrid preconditioned conjugate gradient method (CAMGCG) is applied to the pressure Poisson equation in a multiphase CFD code JUPITER, and its computational performance and convergence property are compared against CA Krylov methods. A new geometric multigrid preconditioner is developed using a preconditioned Chebyshev iteration smoother, in which no global reduction communication is needed, halo data communication is reduced by a mixed precision approach, and eigenvalues are computed using the CA Lanczos method. In the JUPITER code, the CAMGCG solver has robust convergence properties regardless of the problem size, and shows both communication reduction and convergence improvement, leading to higher performance gain than CA Krylov solvers, which achieve only the former. The CAMGCG solver is applied to extreme scale multiphase CFD simulations with ~ 90 billion DOFs, and it is shown that compared with a preconditioned CG solver, the number of iterations, and thus, All_Reduce is reduced to ~ 1/800, and ~ 11.6× speedup is achieved with keeping excellent strong scaling up to 8,000 KNLs on the OakforestPACS.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A communication avoiding (CA) multigrid preconditioned conjugate gradient method (CAMGCG) is applied to the pressure Poisson equation in a multiphase CFD code JUPITER, and its computational performance and convergence property are compared against CA Krylov methods. A new geometric multigrid preconditioner is developed using a preconditioned Chebyshev iteration smoother, in which no global reduction communication is needed, halo data communication is reduced by a mixed precision approach, and eigenvalues are computed using the CA Lanczos method. In the JUPITER code, the CAMGCG solver has robust convergence properties regardless of the problem size, and shows both communication reduction and convergence improvement, leading to higher performance gain than CA Krylov solvers, which achieve only the former. The CAMGCG solver is applied to extreme scale multiphase CFD simulations with ~ 90 billion DOFs, and it is shown that compared with a preconditioned CG solver, the number of iterations, and thus, All_Reduce is reduced to ~ 1/800, and ~ 11.6× speedup is achieved with keeping excellent strong scaling up to 8,000 KNLs on the OakforestPACS.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A communication avoiding (CA) multigrid preconditioned conjugate gradient method (CAMGCG) is applied to the pressure Poisson equation in a multiphase CFD code JUPITER, and its computational performance and convergence property are compared against CA Krylov methods. A new geometric multigrid preconditioner is developed using a preconditioned Chebyshev iteration smoother, in which no global reduction communication is needed, halo data communication is reduced by a mixed precision approach, and eigenvalues are computed using the CA Lanczos method. In the JUPITER code, the CAMGCG solver has robust convergence properties regardless of the problem size, and shows both communication reduction and convergence improvement, leading to higher performance gain than CA Krylov solvers, which achieve only the former. The CAMGCG solver is applied to extreme scale multiphase CFD simulations with ~ 90 billion DOFs, and it is shown that compared with a preconditioned CG solver, the number of iterations, and thus, All_Reduce is reduced to ~ 1/800, and ~ 11.6× speedup is achieved with keeping excellent strong scaling up to 8,000 KNLs on the OakforestPACS.",
"fno": "017600a017",
"keywords": [
"Chebyshev Approximation",
"Computational Fluid Dynamics",
"Conjugate Gradient Methods",
"Convergence Of Numerical Methods",
"Eigenvalues And Eigenfunctions",
"Iterative Methods",
"Mathematics Computing",
"Multiphase Flow",
"Poisson Equation",
"CA Krylov Solvers",
"CAMGCG Solver",
"Extreme Scale Multiphase CFD Simulations",
"Preconditioned CG Solver",
"Communication Avoiding Multigrid Preconditioned Conjugate Gradient Method",
"Pressure Poisson Equation",
"Computational Performance",
"Convergence Property",
"CA Krylov Methods",
"Geometric Multigrid Preconditioner",
"Preconditioned Chebyshev Iteration Smoother",
"Halo Data Communication",
"CA Lanczos Method",
"JUPITER Code",
"Communication Reduction",
"Convergence Improvement",
"Multiphase CFD Code",
"Mixed Precision Approach",
"Eigenvalues",
"Oakforest PACS",
"Convergence",
"Jupiter",
"Computational Fluid Dynamics",
"Poisson Equations",
"Chebyshev Approximation",
"Computational Modeling",
"Iterative Methods",
"Multigrid Method Krylov Method Communication Avoiding Chebyshev Iteration CFD Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Yasuhiro Idomura",
"givenName": "Yasuhiro",
"surname": "Idomura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Takuya Ina",
"givenName": "Takuya",
"surname": "Ina",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Susumu Yamashita",
"givenName": "Susumu",
"surname": "Yamashita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Naoyuki Onodera",
"givenName": "Naoyuki",
"surname": "Onodera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Susumu Yamada",
"givenName": "Susumu",
"surname": "Yamada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Toshiyuki Imamura",
"givenName": "Toshiyuki",
"surname": "Imamura",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "scala",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-11-01T00:00:00",
"pubType": "proceedings",
"pages": "17-24",
"year": "2018",
"issn": null,
"isbn": "978-1-7281-0176-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "017600a009",
"articleId": "18jXK7rnDyg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "017600a025",
"articleId": "18rqyYUD8U8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sc/1994/6605/0/00344279",
"title": "Efficient implementation of the multigrid preconditioned conjugate gradient method on distributed memory machines",
"doi": null,
"abstractUrl": "/proceedings-article/sc/1994/00344279/12OmNqHqSnY",
"parentPublication": {
"id": "proceedings/sc/1994/6605/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scala/2016/5222/0/07836592",
"title": "Left-Preconditioned Communication-Avoiding Conjugate Gradient Methods for Multiphase CFD Simulations on the K Computer",
"doi": null,
"abstractUrl": "/proceedings-article/scala/2016/07836592/12OmNxwncnt",
"parentPublication": {
"id": "proceedings/scala/2016/5222/0",
"title": "2016 7th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dfts/2015/8606/0/07315136",
"title": "Low-overhead fault-tolerance for the preconditioned conjugate gradient solver",
"doi": null,
"abstractUrl": "/proceedings-article/dfts/2015/07315136/12OmNxzMnVz",
"parentPublication": {
"id": "proceedings/dfts/2015/8606/0",
"title": "2015 IEEE International Symposium on Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hipcw/2016/5773/0/07837053",
"title": "An Efficient GPU Parallelization for Arbitrary Collocated Polyhedral Finite Volume Grids and Its Application to Incompressible Fluid Flows",
"doi": null,
"abstractUrl": "/proceedings-article/hipcw/2016/07837053/12OmNyGbIlm",
"parentPublication": {
"id": "proceedings/hipcw/2016/5773/0",
"title": "2016 IEEE 23rd International Conference on High-Performance Computing: Workshops (HiPCW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispan/1994/6507/0/00367184",
"title": "Cenju-3 parallel computer and its application to CFD",
"doi": null,
"abstractUrl": "/proceedings-article/ispan/1994/00367184/12OmNzlUKpQ",
"parentPublication": {
"id": "proceedings/ispan/1994/6507/0",
"title": "Proceedings of the International Symposium on Parallel Architectures, Algorithms and Networks (ISPAN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-smartcity-dss/2017/2588/0/08291907",
"title": "Comparative Performance Modeling of Parallel Preconditioned Krylov Methods",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2017/08291907/17D45WZZ7B6",
"parentPublication": {
"id": "proceedings/hpcc-smartcity-dss/2017/2588/0",
"title": "2017 IEEE 19th International Conference on High Performance Computing and Communications; IEEE 15th International Conference on Smart City; IEEE 3rd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ftxs/2018/0222/0/022200a049",
"title": "Extending and Evaluating Fault-Tolerant Preconditioned Conjugate Gradient Methods",
"doi": null,
"abstractUrl": "/proceedings-article/ftxs/2018/022200a049/17D45Xi9rWg",
"parentPublication": {
"id": "proceedings/ftxs/2018/0222/0",
"title": "2018 IEEE/ACM 8th Workshop on Fault Tolerance for HPC at eXtreme Scale (FTXS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ftxs/2019/6013/0/601300a031",
"title": "Node-Failure-Resistant Preconditioned Conjugate Gradient Method without Replacement Nodes",
"doi": null,
"abstractUrl": "/proceedings-article/ftxs/2019/601300a031/1gjSaoLql4k",
"parentPublication": {
"id": "proceedings/ftxs/2019/6013/0",
"title": "2019 IEEE/ACM 9th Workshop on Fault Tolerance for HPC at eXtreme Scale (FTXS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scala/2019/5989/0/598900a001",
"title": "GPU Acceleration of Communication Avoiding Chebyshev Basis Conjugate Gradient Solver for Multiphase CFD Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/scala/2019/598900a001/1grNUkoYFbi",
"parentPublication": {
"id": "proceedings/scala/2019/5989/0",
"title": "2019 IEEE/ACM 10th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scala/2021/1128/0/112800a001",
"title": "Iterative methods with mixed-precision preconditioning for ill-conditioned linear systems in multiphase CFD simulations",
"doi": null,
"abstractUrl": "/proceedings-article/scala/2021/112800a001/1zHJ3yi7ijS",
"parentPublication": {
"id": "proceedings/scala/2021/1128/0",
"title": "2021 12th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1HOxvuXQgWA",
"title": "SC17: International Conference for High Performance Computing, Networking, Storage and Analysis",
"acronym": "sc",
"groupId": "1000729",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "1HOxCiJdaE0",
"doi": null,
"title": "Optimizing Geometric Multigrid Method Computation using a DSL Approach",
"normalizedTitle": "Optimizing Geometric Multigrid Method Computation using a DSL Approach",
"abstract": "The Geometric Multigrid (GMG) method is widely used in numerical analysis to accelerate the convergence of partial differential equations solvers using a hierarchy of grid discretizations. Multiple grid sizes and recursive expression of multigrid cycles make the task of program optimization tedious. A high-level language that aids domain experts for GMG with effective optimization and parallelization support is thus valuable. We demonstrate how high performance can be achieved along with enhanced programmability for GMG, with new language/optimization support in the PolyMage DSL framework. We compare our approach with (a) hand-optimized code, (b) hand-optimized code in conjunction with polyhedral optimization techniques, and (c) the existing PolyMage optimizer adapted to multigrid. We use benchmarks varying in multigrid cycle structure and smoothing steps for evaluation. On a 24-core Intel Xeon Haswell multicore system, our automatically optimized codes achieve a mean improvement of 3. 2x over straightforward parallelization, and 1. 31x over the PolyMage optimizer.CCS CONCEPTS• Software and its engineering →Compilers;",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Geometric Multigrid (GMG) method is widely used in numerical analysis to accelerate the convergence of partial differential equations solvers using a hierarchy of grid discretizations. Multiple grid sizes and recursive expression of multigrid cycles make the task of program optimization tedious. A high-level language that aids domain experts for GMG with effective optimization and parallelization support is thus valuable. We demonstrate how high performance can be achieved along with enhanced programmability for GMG, with new language/optimization support in the PolyMage DSL framework. We compare our approach with (a) hand-optimized code, (b) hand-optimized code in conjunction with polyhedral optimization techniques, and (c) the existing PolyMage optimizer adapted to multigrid. We use benchmarks varying in multigrid cycle structure and smoothing steps for evaluation. On a 24-core Intel Xeon Haswell multicore system, our automatically optimized codes achieve a mean improvement of 3. 2x over straightforward parallelization, and 1. 31x over the PolyMage optimizer.CCS CONCEPTS• Software and its engineering →Compilers;",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Geometric Multigrid (GMG) method is widely used in numerical analysis to accelerate the convergence of partial differential equations solvers using a hierarchy of grid discretizations. Multiple grid sizes and recursive expression of multigrid cycles make the task of program optimization tedious. A high-level language that aids domain experts for GMG with effective optimization and parallelization support is thus valuable. We demonstrate how high performance can be achieved along with enhanced programmability for GMG, with new language/optimization support in the PolyMage DSL framework. We compare our approach with (a) hand-optimized code, (b) hand-optimized code in conjunction with polyhedral optimization techniques, and (c) the existing PolyMage optimizer adapted to multigrid. We use benchmarks varying in multigrid cycle structure and smoothing steps for evaluation. On a 24-core Intel Xeon Haswell multicore system, our automatically optimized codes achieve a mean improvement of 3. 2x over straightforward parallelization, and 1. 31x over the PolyMage optimizer.CCS CONCEPTS• Software and its engineering →Compilers;",
"fno": "09926262",
"keywords": [
"Differential Equations",
"Multiprocessing Systems",
"Numerical Analysis",
"Optimisation",
"Partial Differential Equations",
"Program Compilers",
"Polyhedral Optimization Techniques",
"Hand Optimized Code",
"Poly Mage DSL Framework",
"Parallelization Support",
"Effective Optimization",
"Domain Experts",
"High Level Language",
"Program Optimization",
"Multigrid Cycles",
"Recursive Expression",
"Multiple Grid Sizes",
"Grid Discretizations",
"Partial Differential Equations Solvers",
"Numerical Analysis",
"GMG",
"DSL Approach",
"Geometric Multigrid Method Computation",
"Automatically Optimized Codes",
"24 Core Intel Xeon Haswell Multicore System",
"Smoothing Steps",
"Multigrid Cycle Structure",
"Existing Poly Mage Optimizer",
"Codes",
"Smoothing Methods",
"Partial Differential Equations",
"Multigrid Methods",
"Benchmark Testing",
"Software",
"DSL",
"Multigrid",
"Tiling",
"Parallelization"
],
"authors": [
{
"affiliation": "Indian Institute of Science,Dept of CSA,Bangalore,India,560012",
"fullName": "Vinay Vasista",
"givenName": "Vinay",
"surname": "Vasista",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Science,Dept of CSA,Bangalore,India,560012",
"fullName": "Kumudha Narasimhan",
"givenName": "Kumudha",
"surname": "Narasimhan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "International Institute of Information Technology,Hyderabad,India,500032",
"fullName": "Siddharth Bhat",
"givenName": "Siddharth",
"surname": "Bhat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Science,Dept of CSA,Bangalore,India,560012",
"fullName": "Uday Bondhugula",
"givenName": "Uday",
"surname": "Bondhugula",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-13",
"year": "2017",
"issn": null,
"isbn": "978-1-4503-5114-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09926281",
"articleId": "1HOxxJT4rSg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09926278",
"articleId": "1HOxA5a0gdq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ipdps/2003/1926/0/19260058a",
"title": "Vectorization of Multigrid Codes Using SIMD ISA Extensions",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2003/19260058a/12OmNqIQS8A",
"parentPublication": {
"id": "proceedings/ipdps/2003/1926/0",
"title": "Parallel and Distributed Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/3941/4/3941d242",
"title": "A Cascadic Multigrid Algorithm for the Double Obstacle Problem",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/3941d242/12OmNviHKgf",
"parentPublication": {
"id": "proceedings/iccms/2010/3941/4",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2012/0806/0/1000a045",
"title": "Parallel geometric-algebraic multigrid on unstructured forests of octrees",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2012/1000a045/12OmNy7Qfuf",
"parentPublication": {
"id": "proceedings/sc/2012/0806/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097787",
"title": "Optimization of serial and parallel communications for parallel geometric multigrid method",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097787/12OmNyQ7FSs",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/shpcc/1992/2775/0/00232666",
"title": "Incremental mapping for solution-adaptive multigrid hierarchies",
"doi": null,
"abstractUrl": "/proceedings-article/shpcc/1992/00232666/12OmNyoiZ5O",
"parentPublication": {
"id": "proceedings/shpcc/1992/2775/0",
"title": "1992 Proceedings Scalable High Performance Computing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2006/06/c6024",
"title": "An Introduction to Algebraic Multigrid",
"doi": null,
"abstractUrl": "/magazine/cs/2006/06/c6024/13rRUwkxc1e",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2006/06/c6010",
"title": "Guest Editors' Introduction: Multigrid Computing",
"doi": null,
"abstractUrl": "/magazine/cs/2006/06/c6010/13rRUxYIMQn",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2006/06/c6012",
"title": "Why Multigrid Methods Are So Efficient",
"doi": null,
"abstractUrl": "/magazine/cs/2006/06/c6012/13rRUy2YLOO",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2006/05/c5096",
"title": "Multigrid Methods: Managing Massive Meshes",
"doi": null,
"abstractUrl": "/magazine/cs/2006/05/c5096/13rRUygBw9Z",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2019/1246/0/124600a101",
"title": "Asynchronous Multigrid Methods",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2019/124600a101/1cYhNxfoynm",
"parentPublication": {
"id": "proceedings/ipdps/2019/1246/0",
"title": "2019 IEEE International Parallel and Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1HzBwPYPPJ6",
"title": "SC21: International Conference for High Performance Computing, Networking, Storage and Analysis",
"acronym": "sc",
"groupId": "1000729",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1HzBIgRKoOA",
"doi": "10.1145/3458817.3476218",
"title": "Distributed Multigrid Neural Solvers on Megavoxel Domains",
"normalizedTitle": "Distributed Multigrid Neural Solvers on Megavoxel Domains",
"abstract": "We consider the distributed training of large scale neural networks that serve as PDE (partial differential equation) solvers producing full field outputs. We specifically consider neural solvers for the generalized 3D Poisson equation over megavoxel domains. A scalable framework is presented that integrates two distinct advances. First, we accelerate training a large model via a method analogous to the multigrid technique used in numerical linear algebra. Here, the network is trained using a hierarchy of increasing resolution inputs in sequence, analogous to the ‘V’, ‘W’, ‘F’ and ‘Half-V’ cycles used in multigrid approaches. In conjunction with the multi-grid approach, we implement a distributed deep learning framework which significantly reduces the time to solve. We show scalability of this approach on both GPU (Azure VMs on Cloud) and CPU clusters (PSC Bridges2). This approach is deployed to train a generalized 3D Poisson solver that scales well to predict output full field solutions up to the resolution of 512 × 512 × 512 for a high dimensional family of inputs. This strategy opens up the possibility of fast and scalable training of neural PDE solvers on heterogeneous clusters.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We consider the distributed training of large scale neural networks that serve as PDE (partial differential equation) solvers producing full field outputs. We specifically consider neural solvers for the generalized 3D Poisson equation over megavoxel domains. A scalable framework is presented that integrates two distinct advances. First, we accelerate training a large model via a method analogous to the multigrid technique used in numerical linear algebra. Here, the network is trained using a hierarchy of increasing resolution inputs in sequence, analogous to the ‘V’, ‘W’, ‘F’ and ‘Half-V’ cycles used in multigrid approaches. In conjunction with the multi-grid approach, we implement a distributed deep learning framework which significantly reduces the time to solve. We show scalability of this approach on both GPU (Azure VMs on Cloud) and CPU clusters (PSC Bridges2). This approach is deployed to train a generalized 3D Poisson solver that scales well to predict output full field solutions up to the resolution of 512 × 512 × 512 for a high dimensional family of inputs. This strategy opens up the possibility of fast and scalable training of neural PDE solvers on heterogeneous clusters.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We consider the distributed training of large scale neural networks that serve as PDE (partial differential equation) solvers producing full field outputs. We specifically consider neural solvers for the generalized 3D Poisson equation over megavoxel domains. A scalable framework is presented that integrates two distinct advances. First, we accelerate training a large model via a method analogous to the multigrid technique used in numerical linear algebra. Here, the network is trained using a hierarchy of increasing resolution inputs in sequence, analogous to the ‘V’, ‘W’, ‘F’ and ‘Half-V’ cycles used in multigrid approaches. In conjunction with the multi-grid approach, we implement a distributed deep learning framework which significantly reduces the time to solve. We show scalability of this approach on both GPU (Azure VMs on Cloud) and CPU clusters (PSC Bridges2). This approach is deployed to train a generalized 3D Poisson solver that scales well to predict output full field solutions up to the resolution of 512 × 512 × 512 for a high dimensional family of inputs. This strategy opens up the possibility of fast and scalable training of neural PDE solvers on heterogeneous clusters.",
"fno": "09910116",
"keywords": [
"Training",
"Three Dimensional Displays",
"Poisson Equations",
"Scalability",
"High Performance Computing",
"Neural Networks",
"Linear Algebra",
"Physics Aware Neural Networks",
"Distributed Training",
"Multigrid",
"Neural PDE Solvers"
],
"authors": [
{
"affiliation": "Iowa State University Ames,Iowa,USA",
"fullName": "Aditya Balu",
"givenName": "Aditya",
"surname": "Balu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vinay Rao Rocket ML Inc. Portland,Oregon,USA",
"fullName": "Sergio Botelho",
"givenName": "Sergio",
"surname": "Botelho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University Ames,Iowa,USA",
"fullName": "Biswajit Khara",
"givenName": "Biswajit",
"surname": "Khara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vinay Rao Rocket ML Inc. Portland,Oregon,USA",
"fullName": "Vinay Rao",
"givenName": "Vinay",
"surname": "Rao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University Ames,Iowa,USA",
"fullName": "Soumik Sarkar",
"givenName": "Soumik",
"surname": "Sarkar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "New York University,New York City,New York,USA",
"fullName": "Chinmay Hegde",
"givenName": "Chinmay",
"surname": "Hegde",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University Ames,Iowa,USA",
"fullName": "Adarsh Krishnamurthy",
"givenName": "Adarsh",
"surname": "Krishnamurthy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vinay Rao Rocket ML Inc. Portland,Oregon,USA",
"fullName": "Santi Adavani",
"givenName": "Santi",
"surname": "Adavani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University Ames,Iowa,USA",
"fullName": "Baskar Ganapathysubramanian",
"givenName": "Baskar",
"surname": "Ganapathysubramanian",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-15",
"year": "2021",
"issn": null,
"isbn": "978-1-4503-8442-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09910062",
"articleId": "1HzBCdEAPHa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09910059",
"articleId": "1HzBDExSxbO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ipdpsw/2013/4979/0/4979b675",
"title": "Systematic Reduction of Data Movement in Algebraic Multigrid Solvers",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2013/4979b675/12OmNwudQNR",
"parentPublication": {
"id": "proceedings/ipdpsw/2013/4979/0",
"title": "2013 IEEE International Symposium on Parallel & Distributed Processing, Workshops and Phd Forum (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/superc/2003/695/0/01592929",
"title": "Applications of Algebraic Multigrid to Large-Scale Finite Element Analysis of Whole Bone Micro-Mechanics on the IBM SP",
"doi": null,
"abstractUrl": "/proceedings-article/superc/2003/01592929/12OmNxFaLFF",
"parentPublication": {
"id": "proceedings/superc/2003/695/0",
"title": "ACM/IEEE SC 2003 Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2014/3799/0/06877343",
"title": "s-Step Krylov Subspace Methods as Bottom Solvers for Geometric Multigrid",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2014/06877343/12OmNzQzqfm",
"parentPublication": {
"id": "proceedings/ipdps/2014/3799/0",
"title": "2014 IEEE International Parallel & Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2011/01/ttd2011010022",
"title": "Cyclic Reduction Tridiagonal Solvers on GPUs Applied to Mixed-Precision Multigrid",
"doi": null,
"abstractUrl": "/journal/td/2011/01/ttd2011010022/13rRUwd9CFF",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07364293",
"title": "Solving the Fluid Pressure Poisson Equation Using Multigrid—Evaluation and Improvements",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07364293/13rRUwvBy8Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06171181",
"title": "A Multigrid Fluid Pressure Solver Handling Separating Solid Boundary Conditions",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06171181/13rRUxlgxTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scala/2018/0176/0/017600a017",
"title": "Communication Avoiding Multigrid Preconditioned Conjugate Gradient Method for Extreme Scale Multiphase CFD Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/scala/2018/017600a017/18jXKefKL6g",
"parentPublication": {
"id": "proceedings/scala/2018/0176/0",
"title": "2018 IEEE/ACM 9th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (scalA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/1999/1966/0/01592670",
"title": "Parallel Multigrid Solver for 3D Unstructured Finite Element Problems",
"doi": null,
"abstractUrl": "/proceedings-article/sc/1999/01592670/1D85NPKIl7q",
"parentPublication": {
"id": "proceedings/sc/1999/1966/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2021/8442/0/09910144",
"title": "Scalable Adaptive PDE Solvers in Arbitrary Domains",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2021/09910144/1HzBMq3b2EM",
"parentPublication": {
"id": "proceedings/sc/2021/8442/0",
"title": "SC21: International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2003/2113/0/01592929",
"title": "Applications of Algebraic Multigrid to Large-Scale Finite Element Analysis of Whole Bone Micro-Mechanics on the IBM SP",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2003/01592929/1MEX0C7qyFG",
"parentPublication": {
"id": "proceedings/sc/2003/2113/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNApcua3",
"title": "Symposium on Interactive Ray Tracing",
"acronym": "rt",
"groupId": "1001330",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqAU6Br",
"doi": "10.1109/RT.2008.4634643",
"title": "Ray tracing NPR-style feature lines",
"normalizedTitle": "Ray tracing NPR-style feature lines",
"abstract": "Though the goal of ray tracing and other physically based rendering techniques is ultimately to produce photorealistic images, it is often helpful to use non-photorealistic rendering techniques to illustrate or highlight certain features in a rendering. We present a method for ray tracing constant screen-width NPR-style feature lines on top of regularly rendered scenes, demonstrating how a variant of line rasterization can be included in a ray tracer, thus allowing for the inclusion of NPR-style enhancements. We are able to render silhouette edges, marking the boundary of an object in screen space against the background (or against farther parts of the same object), intersection lines, marking the curves along which two primitives intersect, and crease edges, indicating curves along which a primitivepsilas normal field is dicontinuous. Including these lines gives the viewer an additional cue to relative positions of objects within the scene, and also enhances particular features within objects, such as sharp corners. The method in this paper was developed in particular for enhancing glyph-based scientific visualization; however, the basic technique can be adapted for many illustrative purposes in different settings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Though the goal of ray tracing and other physically based rendering techniques is ultimately to produce photorealistic images, it is often helpful to use non-photorealistic rendering techniques to illustrate or highlight certain features in a rendering. We present a method for ray tracing constant screen-width NPR-style feature lines on top of regularly rendered scenes, demonstrating how a variant of line rasterization can be included in a ray tracer, thus allowing for the inclusion of NPR-style enhancements. We are able to render silhouette edges, marking the boundary of an object in screen space against the background (or against farther parts of the same object), intersection lines, marking the curves along which two primitives intersect, and crease edges, indicating curves along which a primitivepsilas normal field is dicontinuous. Including these lines gives the viewer an additional cue to relative positions of objects within the scene, and also enhances particular features within objects, such as sharp corners. The method in this paper was developed in particular for enhancing glyph-based scientific visualization; however, the basic technique can be adapted for many illustrative purposes in different settings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Though the goal of ray tracing and other physically based rendering techniques is ultimately to produce photorealistic images, it is often helpful to use non-photorealistic rendering techniques to illustrate or highlight certain features in a rendering. We present a method for ray tracing constant screen-width NPR-style feature lines on top of regularly rendered scenes, demonstrating how a variant of line rasterization can be included in a ray tracer, thus allowing for the inclusion of NPR-style enhancements. We are able to render silhouette edges, marking the boundary of an object in screen space against the background (or against farther parts of the same object), intersection lines, marking the curves along which two primitives intersect, and crease edges, indicating curves along which a primitivepsilas normal field is dicontinuous. Including these lines gives the viewer an additional cue to relative positions of objects within the scene, and also enhances particular features within objects, such as sharp corners. The method in this paper was developed in particular for enhancing glyph-based scientific visualization; however, the basic technique can be adapted for many illustrative purposes in different settings.",
"fno": "04634643",
"keywords": [
"Computational Geometry",
"Curve Fitting",
"Data Visualisation",
"Edge Detection",
"Feature Extraction",
"Image Enhancement",
"Object Detection",
"Ray Tracing",
"Rendering Computer Graphics",
"Ray Tracing Constant Screen Width",
"NPR Style Feature Line",
"Nonphotorealistic Rendering Technique",
"Photorealistic Image",
"Silhouette Edge Rendering",
"Object Boundary Marking",
"Intersection Line",
"Curve Marking",
"Glyph Based Scientific Visualization Enhancement",
"Image Generation",
"Ray Tracing",
"Geometry",
"Color",
"Three Dimensional Displays",
"Visualization",
"Image Edge Detection"
],
"authors": [
{
"affiliation": "SCI Institute, University of Utah, USA",
"fullName": "A.N.M. Imroz Choudhury",
"givenName": "A.N.M. Imroz",
"surname": "Choudhury",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "SCI Institute, University of Utah, USA",
"fullName": "Steven G. Parker",
"givenName": "Steven G.",
"surname": "Parker",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "rt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-08-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04634642",
"articleId": "12OmNvjyxSY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04634644",
"articleId": "12OmNqJ8ttG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rt/2006/0693/0/04061539",
"title": "Ray Tracing for the Movie `Cars'",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2006/04061539/12OmNBBzoiL",
"parentPublication": {
"id": "proceedings/rt/2006/0693/0",
"title": "IEEE Symposium on Interactive Ray Tracing 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pvg/2003/2091/0/20910012",
"title": "Distributed Interactive Ray Tracing for Large Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/2003/20910012/12OmNBsue7j",
"parentPublication": {
"id": "proceedings/pvg/2003/2091/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2008/2741/0/04634650",
"title": "An FPGA implementation of whitted-style ray tracing accelerator",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2008/04634650/12OmNCgJecj",
"parentPublication": {
"id": "proceedings/rt/2008/2741/0",
"title": "Symposium on Interactive Ray Tracing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2003/1845/0/18450272",
"title": "Ray Tracing Point Set Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2003/18450272/12OmNqGRG73",
"parentPublication": {
"id": "proceedings/smi/2003/1845/0",
"title": "Shape Modeling and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2003/1946/0/19460202",
"title": "Ray Tracing Height Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2003/19460202/12OmNvrdI3r",
"parentPublication": {
"id": "proceedings/cgi/2003/1946/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbg/2005/20/0/01500312",
"title": "Interactive ray tracing of point-based models",
"doi": null,
"abstractUrl": "/proceedings-article/pbg/2005/01500312/12OmNxaeu1E",
"parentPublication": {
"id": "proceedings/pbg/2005/20/0",
"title": "Point-Based Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2008/2741/0/04634622",
"title": "Coherent ray tracing via stream filtering",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2008/04634622/12OmNxxdZDN",
"parentPublication": {
"id": "proceedings/rt/2008/2741/0",
"title": "Symposium on Interactive Ray Tracing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08115176",
"title": "Time Interval Ray Tracing for Motion Blur",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08115176/14H4WMfTBId",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2018/6873/0/08739241",
"title": "Galaxy: Asynchronous Ray Tracing for Large High-Fidelity Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2018/08739241/1b1xcjia3Be",
"parentPublication": {
"id": "proceedings/ldav/2018/6873/0",
"title": "2018 IEEE 8th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a391",
"title": "ARRay-Tracing - A Middleware to Provide Ray Tracing Capabilities to Augmented Reality Libraries",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a391/1oZBBWaRzNu",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqyUUIX",
"title": "Shape Modeling and Applications, International Conference on",
"acronym": "smi",
"groupId": "1000664",
"volume": "0",
"displayVolume": "0",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqGRG73",
"doi": "10.1109/SMI.2003.1199627",
"title": "Ray Tracing Point Set Surfaces",
"normalizedTitle": "Ray Tracing Point Set Surfaces",
"abstract": "Point set surfaces (PSS) are a smooth manifold surface approximation from a set of sample points. The surface definition is based on a projection operation that constructs local polynomial approximations and respects a minimum feature size. We present techniques for ray tracing PSSs. For the computation of ray-surface intersection the properties of the projection operation are exploited: The surface is enclosed by a union of minimum feature size spheres. A ray is intersected with the spheres first and inside the spheres with local polynomial approximations. Our results show that 2-3 projections are sufficient to accurately intersect a ray with the surface.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Point set surfaces (PSS) are a smooth manifold surface approximation from a set of sample points. The surface definition is based on a projection operation that constructs local polynomial approximations and respects a minimum feature size. We present techniques for ray tracing PSSs. For the computation of ray-surface intersection the properties of the projection operation are exploited: The surface is enclosed by a union of minimum feature size spheres. A ray is intersected with the spheres first and inside the spheres with local polynomial approximations. Our results show that 2-3 projections are sufficient to accurately intersect a ray with the surface.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Point set surfaces (PSS) are a smooth manifold surface approximation from a set of sample points. The surface definition is based on a projection operation that constructs local polynomial approximations and respects a minimum feature size. We present techniques for ray tracing PSSs. For the computation of ray-surface intersection the properties of the projection operation are exploited: The surface is enclosed by a union of minimum feature size spheres. A ray is intersected with the spheres first and inside the spheres with local polynomial approximations. Our results show that 2-3 projections are sufficient to accurately intersect a ray with the surface.",
"fno": "18450272",
"keywords": [
"Ray Tracing",
"Point Sampled Geometry",
"Point Set Surfaces"
],
"authors": [
{
"affiliation": "TU Darmstadt, GRIS",
"fullName": "Anders Adamson",
"givenName": "Anders",
"surname": "Adamson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Darmstadt, GRIS",
"fullName": "Marc Alexa",
"givenName": "Marc",
"surname": "Alexa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "smi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-05-01T00:00:00",
"pubType": "proceedings",
"pages": "272",
"year": "2003",
"issn": null,
"isbn": "0-7695-1909-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "18450264",
"articleId": "12OmNz5apEz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "18450283",
"articleId": "12OmNC2OSJ7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgra/2006/2686/0/04027048",
"title": "Curvature-driven modeling and rendering of point-based surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/sibgra/2006/04027048/12OmNCcKQsH",
"parentPublication": {
"id": "proceedings/sibgra/2006/2686/0",
"title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2006/0693/0/04061558",
"title": "Direct and Fast Ray Tracing of NURBS Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2006/04061558/12OmNrkT7sg",
"parentPublication": {
"id": "proceedings/rt/2006/0693/0",
"title": "IEEE Symposium on Interactive Ray Tracing 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2003/1946/0/19460202",
"title": "Ray Tracing Height Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2003/19460202/12OmNvrdI3r",
"parentPublication": {
"id": "proceedings/cgi/2003/1946/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbg/2005/20/0/01500312",
"title": "Interactive ray tracing of point-based models",
"doi": null,
"abstractUrl": "/proceedings-article/pbg/2005/01500312/12OmNxaeu1E",
"parentPublication": {
"id": "proceedings/pbg/2005/20/0",
"title": "Point-Based Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2001/1007/0/10070215",
"title": "Ray Tracing Surfaces of Revolution: An Old Problem with A New Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2001/10070215/12OmNxbEtFk",
"parentPublication": {
"id": "proceedings/cgi/2001/1007/0",
"title": "Proceedings. Computer Graphics International 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2007/1629/0/04342599",
"title": "Towards Hardware Ray Tracing using Fixed Point Arithmetic",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2007/04342599/12OmNy5hRnZ",
"parentPublication": {
"id": "proceedings/rt/2007/1629/0",
"title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacific-graphics/2010/4205/0/4205a101",
"title": "GPU-Based Ray Tracing of Splats",
"doi": null,
"abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a101/12OmNzwHvuo",
"parentPublication": {
"id": "proceedings/pacific-graphics/2010/4205/0",
"title": "Pacific Conference on Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/02/ttg2010020261",
"title": "Real-Time Ray Tracing of Implicit Surfaces on the GPU",
"doi": null,
"abstractUrl": "/journal/tg/2010/02/ttg2010020261/13rRUwI5TQT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/03/v0202",
"title": "Ray-Tracing Triangular Trimmed Free-Form Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/1998/03/v0202/13rRUwhpBDW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/03/v0238",
"title": "Interactive Ray Tracing for Volume Visualization",
"doi": null,
"abstractUrl": "/journal/tg/1999/03/v0238/13rRUxOdD85",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNB836KO",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvnwVmC",
"doi": "10.1109/ICME.2005.1521622",
"title": "The sound wave ray-space",
"normalizedTitle": "The sound wave ray-space",
"abstract": "This paper addresses the problem of 3D sound representation without sound source localization and proposes a theory based on the ray-space representation of light rays, which is independent of object's specifications. An array of beam-formed microphone-arrays (MAs), are set and each MA generates a sound-image (SImage) by scanning the viewing range of a camera in the same location. SImage has the same size of an image and contains of blocks of sound wave with duration of one image-frame. Captured SImages with the array of MAs generate the sound wave ray-space. To make a dense SImage ray-space, we propose to use the geometry compensation of corresponding images in the location of each MA. By a dense sound ray-space, any virtual SImage, which corresponds to an arbitrary listening-point, can be generated. The listening-point sound is generated by averaging the sound wave in each pixel or group of pixel of the virtual SImage.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper addresses the problem of 3D sound representation without sound source localization and proposes a theory based on the ray-space representation of light rays, which is independent of object's specifications. An array of beam-formed microphone-arrays (MAs), are set and each MA generates a sound-image (SImage) by scanning the viewing range of a camera in the same location. SImage has the same size of an image and contains of blocks of sound wave with duration of one image-frame. Captured SImages with the array of MAs generate the sound wave ray-space. To make a dense SImage ray-space, we propose to use the geometry compensation of corresponding images in the location of each MA. By a dense sound ray-space, any virtual SImage, which corresponds to an arbitrary listening-point, can be generated. The listening-point sound is generated by averaging the sound wave in each pixel or group of pixel of the virtual SImage.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper addresses the problem of 3D sound representation without sound source localization and proposes a theory based on the ray-space representation of light rays, which is independent of object's specifications. An array of beam-formed microphone-arrays (MAs), are set and each MA generates a sound-image (SImage) by scanning the viewing range of a camera in the same location. SImage has the same size of an image and contains of blocks of sound wave with duration of one image-frame. Captured SImages with the array of MAs generate the sound wave ray-space. To make a dense SImage ray-space, we propose to use the geometry compensation of corresponding images in the location of each MA. By a dense sound ray-space, any virtual SImage, which corresponds to an arbitrary listening-point, can be generated. The listening-point sound is generated by averaging the sound wave in each pixel or group of pixel of the virtual SImage.",
"fno": "01521622",
"keywords": [
"Listening Point Sound",
"3 D Sound Representation",
"Ray Space Representation",
"Light Ray",
"Beam Formed Microphone Arrays",
"Sound Image Generation",
"Virtual S Image",
"Camera",
"Geometry Compensation"
],
"authors": [
{
"affiliation": "Inf. Technol. Center, Nagoya Univ., Japan",
"fullName": "M.P. Tehrani",
"givenName": "M.P.",
"surname": "Tehrani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inf. Technol. Center, Nagoya Univ., Japan",
"fullName": "Y. Hirano",
"givenName": "Y.",
"surname": "Hirano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "T. Fujii",
"givenName": "T.",
"surname": "Fujii",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "S. Kajita",
"givenName": "S.",
"surname": "Kajita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "K. Takeda",
"givenName": "K.",
"surname": "Takeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M. Tanimoto",
"givenName": "M.",
"surname": "Tanimoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "K. Mase",
"givenName": "K.",
"surname": "Mase",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-07-01T00:00:00",
"pubType": "proceedings",
"pages": "4 pp.",
"year": "2005",
"issn": null,
"isbn": "0-7803-9331-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01521621",
"articleId": "12OmNxFaLmP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01521623",
"articleId": "12OmNzTH0H1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icgec/2011/4449/0/4449a361",
"title": "Evacuation Model Based on Ellipse Sound Wave Diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2011/4449a361/12OmNBPtJFa",
"parentPublication": {
"id": "proceedings/icgec/2011/4449/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/2/02531020",
"title": "Two Types of Sound Tool for Editing Speech Signal: Sound Cutter and Symbolic Sound Editor",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02531020/12OmNBtl1GU",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/2",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/3/4296e582",
"title": "Study on the Spread Angle of the Sound Field of Ultrasonic Creeping Wave Probe",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296e582/12OmNqIQSeQ",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/3",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuc/2008/3433/0/3433a036",
"title": "Performance Evaluation of 3D Sound Field Reproduction System Using a Few Loudspeakers and Wave Field Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/isuc/2008/3433a036/12OmNwogh6H",
"parentPublication": {
"id": "proceedings/isuc/2008/3433/0",
"title": "2008 Second International Symposium on Universal Communication",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicn/2011/4587/0/4587a177",
"title": "Analysis of Deformities in Lung Using Short Time Fourier Transform Spectrogram Analysis on Lung Sound",
"doi": null,
"abstractUrl": "/proceedings-article/cicn/2011/4587a177/12OmNyq0zEK",
"parentPublication": {
"id": "proceedings/cicn/2011/4587/0",
"title": "Computational Intelligence and Communication Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiaiaai/2013/2134/0/06630328",
"title": "Control of Microorganism by Sound Wave -- Effects of Sound Wave on Enzyme Balance in Rice Koji",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2013/06630328/12OmNzBOhYL",
"parentPublication": {
"id": "proceedings/iiaiaai/2013/2134/0",
"title": "2013 IIAI International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2010/4359/0/4359a211",
"title": "Sound Wave Propagation Applied in Games",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2010/4359a211/12OmNzX6clk",
"parentPublication": {
"id": "proceedings/sbgames/2010/4359/0",
"title": "2010 Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08307458",
"title": "Diffraction Kernels for Interactive Sound Propagation in Dynamic Environments",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08307458/13rRUwh80Hk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07360212",
"title": "Tracing Analytic Ray Curves for Light and Sound Propagation in Non-Linear Media",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07360212/13rRUxYIN4c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07014276",
"title": "WAVE: Interactive Wave-based Sound Propagation for Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07014276/13rRUygT7yf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBSBk5H",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"acronym": "cadgraphics",
"groupId": "1001488",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxFaLiE",
"doi": "10.1109/CAD/Graphics.2011.70",
"title": "SIMD Friendly Ray Tracing on GPU",
"normalizedTitle": "SIMD Friendly Ray Tracing on GPU",
"abstract": "In this paper, we present a novel BVH tracing method on GPU, which can achieve better SIMD utilization than traditional method. In the traditional way, thread usually sticks to a ray until the closest hit is found. When the threads of the same warp follow very divergent ray paths, SIMD utilization drops significantly. The idea of our method is to redefine the way of work distribution by binding the ray and the data to be tested together, in order to spread the computation of the single ray to multi threads. We also separate the tracing process into three steps to collect the work units of the same type and process them in a stream-like manner. The first step is ray traversal whose task is to do ray-box testing for the ray-node pairs. Its output is an stack of ray-triangle pairs, which is then fed to intersecting step to form an stack of ray-hit pairs. The last step is to use ray-hit pairs to update the closest hits for each ray of the same warp. The experiment shows our method can efficiently improve the SIMD utilization and result in less tracing time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a novel BVH tracing method on GPU, which can achieve better SIMD utilization than traditional method. In the traditional way, thread usually sticks to a ray until the closest hit is found. When the threads of the same warp follow very divergent ray paths, SIMD utilization drops significantly. The idea of our method is to redefine the way of work distribution by binding the ray and the data to be tested together, in order to spread the computation of the single ray to multi threads. We also separate the tracing process into three steps to collect the work units of the same type and process them in a stream-like manner. The first step is ray traversal whose task is to do ray-box testing for the ray-node pairs. Its output is an stack of ray-triangle pairs, which is then fed to intersecting step to form an stack of ray-hit pairs. The last step is to use ray-hit pairs to update the closest hits for each ray of the same warp. The experiment shows our method can efficiently improve the SIMD utilization and result in less tracing time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a novel BVH tracing method on GPU, which can achieve better SIMD utilization than traditional method. In the traditional way, thread usually sticks to a ray until the closest hit is found. When the threads of the same warp follow very divergent ray paths, SIMD utilization drops significantly. The idea of our method is to redefine the way of work distribution by binding the ray and the data to be tested together, in order to spread the computation of the single ray to multi threads. We also separate the tracing process into three steps to collect the work units of the same type and process them in a stream-like manner. The first step is ray traversal whose task is to do ray-box testing for the ray-node pairs. Its output is an stack of ray-triangle pairs, which is then fed to intersecting step to form an stack of ray-hit pairs. The last step is to use ray-hit pairs to update the closest hits for each ray of the same warp. The experiment shows our method can efficiently improve the SIMD utilization and result in less tracing time.",
"fno": "4497a087",
"keywords": [
"GPU",
"Ray Tracing",
"Rendering",
"BVH"
],
"authors": [
{
"affiliation": null,
"fullName": "Peng Zhou",
"givenName": "Peng",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiangxu Meng",
"givenName": "Xiangxu",
"surname": "Meng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cadgraphics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "87-92",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4497-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4497a079",
"articleId": "12OmNyPQ4MH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4497a093",
"articleId": "12OmNzdoN7B",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rt/2007/1629/0/04342586",
"title": "Ray-Strips: A Compact Mesh Representation for Interactive Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2007/04342586/12OmNASILFQ",
"parentPublication": {
"id": "proceedings/rt/2007/1629/0",
"title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2008/2741/0/04634635",
"title": "Interactive SIMD ray tracing for large deformable tetrahedral meshes",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2008/04634635/12OmNBV9IdJ",
"parentPublication": {
"id": "proceedings/rt/2008/2741/0",
"title": "Symposium on Interactive Ray Tracing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2011/4648/0/4648a011",
"title": "GPU-Based Data Structure for a Parallel Ray Tracing Illumination Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2011/4648a011/12OmNvwC5ve",
"parentPublication": {
"id": "proceedings/sbgames/2011/4648/0",
"title": "2011 Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sccc/2010/4400/0/4400a011",
"title": "Improving the Performance of a Ray Tracing Algorithm Using a GPU",
"doi": null,
"abstractUrl": "/proceedings-article/sccc/2010/4400a011/12OmNx0RJ0z",
"parentPublication": {
"id": "proceedings/sccc/2010/4400/0",
"title": "2010 XXIX International Conference of the Chilean Computer Science Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870027",
"title": "A Hardware Acceleration Method for Volumetric Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870027/12OmNxHJ9p1",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2007/1629/0/04342585",
"title": "Interactive Ray Tracing of Arbitrary Implicits with SIMD Interval Arithmetic",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2007/04342585/12OmNyq0zMM",
"parentPublication": {
"id": "proceedings/rt/2007/1629/0",
"title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2008/2741/0/04634640",
"title": "Augenblick: A user-friendly and extensible realtime ray tracing architecture",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2008/04634640/12OmNyvY9rf",
"parentPublication": {
"id": "proceedings/rt/2008/2741/0",
"title": "Symposium on Interactive Ray Tracing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/02/ttg2010020261",
"title": "Real-Time Ray Tracing of Implicit Surfaces on the GPU",
"doi": null,
"abstractUrl": "/journal/tg/2010/02/ttg2010020261/13rRUwI5TQT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/09/06081859",
"title": "Combining Single and Packet-Ray Tracing for Arbitrary Ray Distributions on the Intel MIC Architecture",
"doi": null,
"abstractUrl": "/journal/tg/2012/09/06081859/13rRUwInvJe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/micro/2022/6272/0/627200a263",
"title": "Vulkan-Sim: A GPU Architecture Simulator for Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/micro/2022/627200a263/1HMSwhI3lO8",
"parentPublication": {
"id": "proceedings/micro/2022/6272/0",
"title": "2022 55th IEEE/ACM International Symposium on Microarchitecture (MICRO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNz2TCud",
"title": "Multimedia Computing and Systems, International Conference on",
"acronym": "icmcs",
"groupId": "1000479",
"volume": "2",
"displayVolume": "2",
"year": "1999",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxecS0l",
"doi": "10.1109/MMCS.1999.778592",
"title": "A Scalable System for 3D Audio Ray Tracing",
"normalizedTitle": "A Scalable System for 3D Audio Ray Tracing",
"abstract": "Though several approaches in sound processing are denoted as 3D audio very few of them generate high quality 3D audio information which allows listeners to exactly locate sound sources in three dimensional space. We present an approach to enhance sound by high quality 3D audio information through acoustic ray tracing where 3D audio is offline processed with digital filters of a head-related transfer function. The basic approach computes a fixed sound source for fixed listener. This approach is extended to generate 3D audio for moving listeners in interactive environments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Though several approaches in sound processing are denoted as 3D audio very few of them generate high quality 3D audio information which allows listeners to exactly locate sound sources in three dimensional space. We present an approach to enhance sound by high quality 3D audio information through acoustic ray tracing where 3D audio is offline processed with digital filters of a head-related transfer function. The basic approach computes a fixed sound source for fixed listener. This approach is extended to generate 3D audio for moving listeners in interactive environments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Though several approaches in sound processing are denoted as 3D audio very few of them generate high quality 3D audio information which allows listeners to exactly locate sound sources in three dimensional space. We present an approach to enhance sound by high quality 3D audio information through acoustic ray tracing where 3D audio is offline processed with digital filters of a head-related transfer function. The basic approach computes a fixed sound source for fixed listener. This approach is extended to generate 3D audio for moving listeners in interactive environments.",
"fno": "02530819",
"keywords": [
"3 D Audio",
"Ray Tracing",
"Virtual Reality"
],
"authors": [
{
"affiliation": "C-LAB",
"fullName": "Wolfgang Mueller",
"givenName": "Wolfgang",
"surname": "Mueller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "C-LAB",
"fullName": "Frank Ullmann",
"givenName": "Frank",
"surname": "Ullmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmcs",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1999-06-01T00:00:00",
"pubType": "proceedings",
"pages": "819",
"year": "1999",
"issn": "1530-2032",
"isbn": "0-7695-0253-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "02530814",
"articleId": "12OmNwNOaOG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "02530824",
"articleId": "12OmNBtCCH5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzzxuy6",
"title": "Computer Graphics International 2005",
"acronym": "cgi",
"groupId": "1000132",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyjccyJ",
"doi": "10.1109/CGI.2005.1500343",
"title": "Ray tracing on the desktop: when and how?",
"normalizedTitle": "Ray tracing on the desktop: when and how?",
"abstract": "Summary form only given. Presents an address on ray tracing via the desktop.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given. Presents an address on ray tracing via the desktop.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given. Presents an address on ray tracing via the desktop.",
"fno": "01500343",
"keywords": [
"Ray Tracing"
],
"authors": [],
"idPrefix": "cgi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-07-01T00:00:00",
"pubType": "proceedings",
"pages": "xi",
"year": "2005",
"issn": null,
"isbn": "0-7803-9330-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "01500339",
"articleId": "12OmNzZ5oiz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rt/2008/2741/0/04634641",
"title": "A straightforward CUDA implementation for interactive ray-tracing",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2008/04634641/12OmNAY79ml",
"parentPublication": {
"id": "proceedings/rt/2008/2741/0",
"title": "Symposium on Interactive Ray Tracing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2008/2741/0/04634613",
"title": "Ray-specialized acceleration structures for ray tracing",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2008/04634613/12OmNBt3qlA",
"parentPublication": {
"id": "proceedings/rt/2008/2741/0",
"title": "Symposium on Interactive Ray Tracing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2011/4648/0/4648a011",
"title": "GPU-Based Data Structure for a Parallel Ray Tracing Illumination Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2011/4648a011/12OmNvwC5ve",
"parentPublication": {
"id": "proceedings/sbgames/2011/4648/0",
"title": "2011 Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sccc/2010/4400/0/4400a011",
"title": "Improving the Performance of a Ray Tracing Algorithm Using a GPU",
"doi": null,
"abstractUrl": "/proceedings-article/sccc/2010/4400a011/12OmNx0RJ0z",
"parentPublication": {
"id": "proceedings/sccc/2010/4400/0",
"title": "2010 XXIX International Conference of the Chilean Computer Science Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a087",
"title": "SIMD Friendly Ray Tracing on GPU",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a087/12OmNxFaLiE",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870027",
"title": "A Hardware Acceleration Method for Volumetric Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870027/12OmNxHJ9p1",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/05/v0562",
"title": "Faster Isosurface Ray Tracing Using Implicit KD-Trees",
"doi": null,
"abstractUrl": "/journal/tg/2005/05/v0562/13rRUwkfAZ5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1986/02/mcg1986020041",
"title": "Ray Tracing Free-Form B-Spline Surfaces",
"doi": null,
"abstractUrl": "/magazine/cg/1986/02/mcg1986020041/13rRUxNmPKT",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2001/02/mcg2001020022",
"title": "A Benchmark for Animated Ray Tracing",
"doi": null,
"abstractUrl": "/magazine/cg/2001/02/mcg2001020022/13rRUyp7u1i",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/micro/2022/6272/0/627200a263",
"title": "Vulkan-Sim: A GPU Architecture Simulator for Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/micro/2022/627200a263/1HMSwhI3lO8",
"parentPublication": {
"id": "proceedings/micro/2022/6272/0",
"title": "2022 55th IEEE/ACM International Symposium on Microarchitecture (MICRO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwbcJ3B",
"title": "Image Processing, International Conference on",
"acronym": "icip",
"groupId": "1000349",
"volume": "3",
"displayVolume": "3",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAObbHD",
"doi": "10.1109/ICIP.1995.538547",
"title": "Image quality and readability",
"normalizedTitle": "Image quality and readability",
"abstract": "Determining the readability of documents is an important task. Human readability pertains to the scenario when a document image is ultimately presented to a human to read. Machine readability pertains to the scenario when the document is subjected to an OCR process. In either case, poor image quality might render a document unreadable. A document image which is human readable is often not machine readable. It is often advisable to filter out documents of poor image quality before sending them to either machine or human for reading. This paper is about the design of such a filter. We describe various factors which affect document image quality and the accuracy of predicting the extent of human and machine readability possible using metrics based on document image quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Determining the readability of documents is an important task. Human readability pertains to the scenario when a document image is ultimately presented to a human to read. Machine readability pertains to the scenario when the document is subjected to an OCR process. In either case, poor image quality might render a document unreadable. A document image which is human readable is often not machine readable. It is often advisable to filter out documents of poor image quality before sending them to either machine or human for reading. This paper is about the design of such a filter. We describe various factors which affect document image quality and the accuracy of predicting the extent of human and machine readability possible using metrics based on document image quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Determining the readability of documents is an important task. Human readability pertains to the scenario when a document image is ultimately presented to a human to read. Machine readability pertains to the scenario when the document is subjected to an OCR process. In either case, poor image quality might render a document unreadable. A document image which is human readable is often not machine readable. It is often advisable to filter out documents of poor image quality before sending them to either machine or human for reading. This paper is about the design of such a filter. We describe various factors which affect document image quality and the accuracy of predicting the extent of human and machine readability possible using metrics based on document image quality.",
"fno": "73103324",
"keywords": [
"Document Image Processing Image Processing Optical Character Recognition Image Readability Documents Readability Human Readability Document Image Machine Readability OCR Process Document Image Quality"
],
"authors": [
{
"affiliation": "Center of Excellence for Document Anal. & Recognition, State Univ. of New York, Buffalo, NY, USA",
"fullName": "V. Govindaraju",
"givenName": "V.",
"surname": "Govindaraju",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center of Excellence for Document Anal. & Recognition, State Univ. of New York, Buffalo, NY, USA",
"fullName": "S.N. Srihari",
"givenName": "S.N.",
"surname": "Srihari",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icip",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-10-01T00:00:00",
"pubType": "proceedings",
"pages": "3324",
"year": "1995",
"issn": null,
"isbn": "0-8186-7310-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "73103320",
"articleId": "12OmNviHKjK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "73103328",
"articleId": "12OmNCbCrNx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCbU3aM",
"title": "Proceedings Sixth International Conference on Information Visualisation",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCbU343",
"doi": "10.1109/IV.2002.1028790",
"title": "Distortion for Readability of Contextualized Text Explanations for Visualizations",
"normalizedTitle": "Distortion for Readability of Contextualized Text Explanations for Visualizations",
"abstract": "Dual-Use of Image Space (DUIS) is a new technique for presenting text explanations for images within image space. This, however, presents the problem of readability of the text since readers are used to reading from rectangular windows, on the other hand, a graphical object will typically have a silhouette of an irregular shape. To improve the readability we have developed a technique called rectangularization, i.e. on users? request the selected object morphs into a rectangular window. In order to preserve the context for the rectangularized object, as the selected object morphs into a rectangle, the other objects in the scene also change shape to create room for the newly created rectangle. On top of discussing the rectangularization concept, this paper discusses the algorithm used to displace the other objects in the scene. We also show the extension of the algorithms to allow for multiple rectangularization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dual-Use of Image Space (DUIS) is a new technique for presenting text explanations for images within image space. This, however, presents the problem of readability of the text since readers are used to reading from rectangular windows, on the other hand, a graphical object will typically have a silhouette of an irregular shape. To improve the readability we have developed a technique called rectangularization, i.e. on users? request the selected object morphs into a rectangular window. In order to preserve the context for the rectangularized object, as the selected object morphs into a rectangle, the other objects in the scene also change shape to create room for the newly created rectangle. On top of discussing the rectangularization concept, this paper discusses the algorithm used to displace the other objects in the scene. We also show the extension of the algorithms to allow for multiple rectangularization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dual-Use of Image Space (DUIS) is a new technique for presenting text explanations for images within image space. This, however, presents the problem of readability of the text since readers are used to reading from rectangular windows, on the other hand, a graphical object will typically have a silhouette of an irregular shape. To improve the readability we have developed a technique called rectangularization, i.e. on users? request the selected object morphs into a rectangular window. In order to preserve the context for the rectangularized object, as the selected object morphs into a rectangle, the other objects in the scene also change shape to create room for the newly created rectangle. On top of discussing the rectangularization concept, this paper discusses the algorithm used to displace the other objects in the scene. We also show the extension of the algorithms to allow for multiple rectangularization.",
"fno": "16560289",
"keywords": [
"Dual Use Of Image Space",
"Text Explanation",
"Distortion",
"Rectangularization"
],
"authors": [
{
"affiliation": "University of Magdeburg",
"fullName": "Wallace Chigona",
"givenName": "Wallace",
"surname": "Chigona",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Magdeburg",
"fullName": "Thomas Strothotte",
"givenName": "Thomas",
"surname": "Strothotte",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-07-01T00:00:00",
"pubType": "proceedings",
"pages": "289",
"year": "2002",
"issn": "1093-9547",
"isbn": "0-7695-1656-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "16560281",
"articleId": "12OmNyQGSnb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "16560295",
"articleId": "12OmNqG0SPi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926732",
"title": "Text-Edge-Box: An Object Proposal Approach for Scene Texts Localization",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926732/12OmNBtl1BL",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e387",
"title": "Recovering Transparent Shape from Time-of-Flight Distortion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e387/12OmNC0y5Eu",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2010/9992/0/05711727",
"title": "Optical watermarking technique robust to geometrical distortion in image",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2010/05711727/12OmNy1SFM7",
"parentPublication": {
"id": "proceedings/isspit/2010/9992/0",
"title": "2010 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/409P3B42",
"title": "Visual stem mapping and Geometric Tense coding for Augmented Visual Vocabulary",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/409P3B42/12OmNy3148V",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/05/ttg2012050662",
"title": "Visual Readability Analysis: How to Make Your Writings Easier to Read",
"doi": null,
"abstractUrl": "/journal/tg/2012/05/ttg2012050662/13rRUwhpBE6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545037",
"title": "Deep Learning-based Face Recognition and the Robustness to Perspective Distortion",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545037/17D45WHONrq",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f886",
"title": "Radial Distortion Invariant Factorization for Structure from Motion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f886/1BmFAFvMMec",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4549",
"title": "Towards Complete Scene and Regular Shape for Distortion Rectification by Curve-Aware Extrapolation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4549/1BmHYzAYwFi",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a573",
"title": "Drawing Network Visualizations on a Continuous, Spherical Surface",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a573/1rSRcY0ThyU",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412659",
"title": "Distortion-Adaptive Grape Bunch Counting for Omnidirectional Images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412659/1tmix6Eg1z2",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBKmXfs",
"title": "2018 13th IAPR International Workshop on Document Analysis Systems (DAS)",
"acronym": "das",
"groupId": "1002506",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwFzNZS",
"doi": "10.1109/DAS.2018.42",
"title": "\"Quality\" vs. \"Readability\" in Document Images: Statistical Analysis of Human Perception",
"normalizedTitle": "\"Quality\" vs. \"Readability\" in Document Images: Statistical Analysis of Human Perception",
"abstract": "Based on the hypothesis that a good / poor quality document image is most probably a readable / unreadable document, document image quality and readability have interchangeably been used in the literature. These two terms, however, have different meanings implying two different perspectives of looking at document images by human being. In document images, the level of quality and the degree of readability may have a relation / correlation considering human perception. However, to the best of our knowledge there is no specific study to characterise this relation and also validate the abovementioned hypothesis. In this work, at first, we created a dataset composed of mostly camera-based document images with various distortion levels. Each document image has then been assessed with regard to two different measures, the level of quality and the degree of readability, by different individuals. A detailed Normalised Cross Correlation analysis along with different statistical analysis based on Shapiro-Wilks and Wilcoxon tests has further been provided to demonstrate how document image quality and readability are linked. Our findings indicate that the quality and readability were somewhat different in terms of the population distributions. However, the correlation between quality and readability was 0.99, which implies document quality and readability are highly correlated based on human perception.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Based on the hypothesis that a good / poor quality document image is most probably a readable / unreadable document, document image quality and readability have interchangeably been used in the literature. These two terms, however, have different meanings implying two different perspectives of looking at document images by human being. In document images, the level of quality and the degree of readability may have a relation / correlation considering human perception. However, to the best of our knowledge there is no specific study to characterise this relation and also validate the abovementioned hypothesis. In this work, at first, we created a dataset composed of mostly camera-based document images with various distortion levels. Each document image has then been assessed with regard to two different measures, the level of quality and the degree of readability, by different individuals. A detailed Normalised Cross Correlation analysis along with different statistical analysis based on Shapiro-Wilks and Wilcoxon tests has further been provided to demonstrate how document image quality and readability are linked. Our findings indicate that the quality and readability were somewhat different in terms of the population distributions. However, the correlation between quality and readability was 0.99, which implies document quality and readability are highly correlated based on human perception.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Based on the hypothesis that a good / poor quality document image is most probably a readable / unreadable document, document image quality and readability have interchangeably been used in the literature. These two terms, however, have different meanings implying two different perspectives of looking at document images by human being. In document images, the level of quality and the degree of readability may have a relation / correlation considering human perception. However, to the best of our knowledge there is no specific study to characterise this relation and also validate the abovementioned hypothesis. In this work, at first, we created a dataset composed of mostly camera-based document images with various distortion levels. Each document image has then been assessed with regard to two different measures, the level of quality and the degree of readability, by different individuals. A detailed Normalised Cross Correlation analysis along with different statistical analysis based on Shapiro-Wilks and Wilcoxon tests has further been provided to demonstrate how document image quality and readability are linked. Our findings indicate that the quality and readability were somewhat different in terms of the population distributions. However, the correlation between quality and readability was 0.99, which implies document quality and readability are highly correlated based on human perception.",
"fno": "3346a363",
"keywords": [
"Cameras",
"Document Image Processing",
"Statistical Analysis",
"Human Perception",
"Document Image Quality",
"Camera Based Document Images",
"Statistical Analysis",
"Good Quality Document Image",
"Poor Quality Document Image",
"Readable Document",
"Unreadable Document",
"Distortion Levels",
"Readability Degree",
"Quality Level",
"Normalised Cross Correlation Analysis",
"Population Distributions",
"Image Quality",
"Optical Character Recognition Software",
"Correlation",
"Text Analysis",
"Sociology",
"Observers",
"Document Image Analysis",
"Quality And Readability",
"Human Visual System",
"Shapiro Wilks Test",
"Wilcoxon Test"
],
"authors": [
{
"affiliation": null,
"fullName": "Alireza Alaei",
"givenName": "Alireza",
"surname": "Alaei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Romain Raveaux",
"givenName": "Romain",
"surname": "Raveaux",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Donatello Conte",
"givenName": "Donatello",
"surname": "Conte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bela Stantic",
"givenName": "Bela",
"surname": "Stantic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "das",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-04-01T00:00:00",
"pubType": "proceedings",
"pages": "363-368",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3346-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3346a357",
"articleId": "12OmNBgQFNQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3346a369",
"articleId": "12OmNyjccyu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1995/7310/3/73103324",
"title": "Image quality and readability",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73103324/12OmNAObbHD",
"parentPublication": {
"id": "proceedings/icip/1995/7310/3",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2007/2822/2/28220569",
"title": "Assessing and Improving the Quality of Document Images Acquired with Portable Digital Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2007/28220569/12OmNAndieR",
"parentPublication": {
"id": "proceedings/icdar/2007/2822/2",
"title": "Ninth International Conference on Document Analysis and Recognition (ICDAR 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2915/0/00201768",
"title": "Image and document processing techniques for the RightPages electronic library system",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201768/12OmNBDgZ1a",
"parentPublication": {
"id": "proceedings/icpr/1992/2915/0",
"title": "11th IAPR International Conference on Pattern Recognition. Vol.II. Conference B: Pattern Recognition Methodology and Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2013/5062/0/06657132",
"title": "Cross Domain Assessment of Document to HTML Conversion Tools to Quantify Text and Structural Loss during Document Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2013/06657132/12OmNBqMDiz",
"parentPublication": {
"id": "proceedings/eisic/2013/5062/0",
"title": "2013 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2017/3586/1/3586b101",
"title": "Beyond OCRs for Document Blur Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2017/3586b101/12OmNCulYm3",
"parentPublication": {
"id": "proceedings/icdar/2017/3586/1",
"title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmpsac/1979/9999/0/00762579",
"title": "Program factors as predictors of program readability",
"doi": null,
"abstractUrl": "/proceedings-article/cmpsac/1979/00762579/12OmNs0TKJC",
"parentPublication": {
"id": "proceedings/cmpsac/1979/9999/0",
"title": "COMPSAC 79 - Proceedings. Computer Software and The IEEE Computer Society's Third International Applications Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2018/3346/0/3346a263",
"title": "Document Image Binarization Using Recurrent Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/das/2018/3346a263/12OmNxwncG4",
"parentPublication": {
"id": "proceedings/das/2018/3346/0",
"title": "2018 13th IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2016/1792/0/1792a227",
"title": "Document Image Quality Assessment Using Discriminative Sparse Representation",
"doi": null,
"abstractUrl": "/proceedings-article/das/2016/1792a227/12OmNzdoMSP",
"parentPublication": {
"id": "proceedings/das/2016/1792/0",
"title": "2016 12th IAPR Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/05/ttg2012050662",
"title": "Visual Readability Analysis: How to Make Your Writings Easier to Read",
"doi": null,
"abstractUrl": "/journal/tg/2012/05/ttg2012050662/13rRUwhpBE6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400b244",
"title": "A New Document Image Quality Assessment Method Based on Hast Derivations",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400b244/1h81psLpCgw",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBzRNrw",
"title": "2013 46th Hawaii International Conference on System Sciences",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzUPpk0",
"doi": "10.1109/HICSS.2013.150",
"title": "Creating Consumer Friendly Health Content: Implementing and Testing a Readability Diagnosis and Enhancement Tool",
"normalizedTitle": "Creating Consumer Friendly Health Content: Implementing and Testing a Readability Diagnosis and Enhancement Tool",
"abstract": "In the era of patient centered care, creating consumer friendly health content is an important task. Manual content development is labor intensive and could benefit from a readability assessment and enhancement tool. Building on our prior work, we developed and evaluated such a tool called ReDE. In testing, a clinician was asked to use the tool to simplify ten full-length medical documents with 9573 words. In order to assess inter-rater agreement, a second clinician simplified four of the ten documents. The results show that 77% of the clinicians' revisions were for concepts identified as difficult by ReDE, which validates the readability assessment made by the tool. However, a much smaller percentage (33%) of the ReDE suggested replacements were accepted by either of the clinicians which indicates further improvement is warranted.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the era of patient centered care, creating consumer friendly health content is an important task. Manual content development is labor intensive and could benefit from a readability assessment and enhancement tool. Building on our prior work, we developed and evaluated such a tool called ReDE. In testing, a clinician was asked to use the tool to simplify ten full-length medical documents with 9573 words. In order to assess inter-rater agreement, a second clinician simplified four of the ten documents. The results show that 77% of the clinicians' revisions were for concepts identified as difficult by ReDE, which validates the readability assessment made by the tool. However, a much smaller percentage (33%) of the ReDE suggested replacements were accepted by either of the clinicians which indicates further improvement is warranted.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the era of patient centered care, creating consumer friendly health content is an important task. Manual content development is labor intensive and could benefit from a readability assessment and enhancement tool. Building on our prior work, we developed and evaluated such a tool called ReDE. In testing, a clinician was asked to use the tool to simplify ten full-length medical documents with 9573 words. In order to assess inter-rater agreement, a second clinician simplified four of the ten documents. The results show that 77% of the clinicians' revisions were for concepts identified as difficult by ReDE, which validates the readability assessment made by the tool. However, a much smaller percentage (33%) of the ReDE suggested replacements were accepted by either of the clinicians which indicates further improvement is warranted.",
"fno": "4892c445",
"keywords": [
"Unified Modeling Language",
"Medical Diagnostic Imaging",
"Vocabulary",
"Educational Institutions",
"Graphical User Interfaces",
"Databases",
"Libraries",
"Consumer Health Vocabulary",
"Natural Language Processing",
"Readability",
"Readability Score",
"Health Readability",
"Consumer Health"
],
"authors": [
{
"affiliation": null,
"fullName": "Joshua Proulx",
"givenName": "Joshua",
"surname": "Proulx",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sasikiran Kandula",
"givenName": "Sasikiran",
"surname": "Kandula",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Brent Hill",
"givenName": "Brent",
"surname": "Hill",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qing Zeng-Treitler",
"givenName": "Qing",
"surname": "Zeng-Treitler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2013-01-01T00:00:00",
"pubType": "proceedings",
"pages": "2445-2453",
"year": "2013",
"issn": "1530-1605",
"isbn": "978-1-4673-5933-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4892c435",
"articleId": "12OmNzYwc12",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4892c454",
"articleId": "12OmNx9FhMU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/alpit/2008/3273/0/3273a156",
"title": "A New Approach to Readability Study Based on Information Computing",
"doi": null,
"abstractUrl": "/proceedings-article/alpit/2008/3273a156/12OmNAkWvyO",
"parentPublication": {
"id": "proceedings/alpit/2008/3273/0",
"title": "Advanced Language Processing and Web Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217846",
"title": "Exploratory textual analysis of consumer health languages for people who are D/deaf and hard of hearing",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217846/12OmNyv7m2k",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/2000/0840/0/08400069",
"title": "Improving Readability in a Visual Language with a VLSI-Like Approach",
"doi": null,
"abstractUrl": "/proceedings-article/vl/2000/08400069/12OmNzcPA2X",
"parentPublication": {
"id": "proceedings/vl/2000/0840/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1995/7045/0/70450037",
"title": "Improving readability of iconic programs with multiple view object representation",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1995/70450037/12OmNzsJ7wm",
"parentPublication": {
"id": "proceedings/vl/1995/7045/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/05/ttg2012050662",
"title": "Visual Readability Analysis: How to Make Your Writings Easier to Read",
"doi": null,
"abstractUrl": "/journal/tg/2012/05/ttg2012050662/13rRUwhpBE6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2016/03/mit2016030045",
"title": "Moving Beyond Readability Metrics for Health-Related Text Simplification",
"doi": null,
"abstractUrl": "/magazine/it/2016/03/mit2016030045/13rRUx0gerG",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a001",
"title": "Advances in Readability Research: A New Readability Web App for English",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a001/1FUUhMR5RUQ",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnlp/2022/9544/0/954400a439",
"title": "Research on Readability Grade Formula Based on HSK Compositions",
"doi": null,
"abstractUrl": "/proceedings-article/icnlp/2022/954400a439/1GNtoF1PJmw",
"parentPublication": {
"id": "proceedings/icnlp/2022/9544/0",
"title": "2022 4th International Conference on Natural Language Processing (ICNLP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2021/6834/0/683400a025",
"title": "Coherence and Cohesion for the Assessment of Text Readability by Coh-Metrix",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2021/683400a025/1vmLKZcRt8A",
"parentPublication": {
"id": "proceedings/icekim/2021/6834/0",
"title": "2021 2nd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieit/2021/2563/0/256300a569",
"title": "A Corpus-Based Comparative Analysis of the Readability of Chinese Textbooks Used by the Preliminary Schools in Mainland China and Taiwan",
"doi": null,
"abstractUrl": "/proceedings-article/ieit/2021/256300a569/1wHKu6RyxDa",
"parentPublication": {
"id": "proceedings/ieit/2021/2563/0",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyqRne6",
"title": "Visual Languages, IEEE Symposium on",
"acronym": "vl",
"groupId": "1000793",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzcPA2X",
"doi": "10.1109/VL.2000.874356",
"title": "Improving Readability in a Visual Language with a VLSI-Like Approach",
"normalizedTitle": "Improving Readability in a Visual Language with a VLSI-Like Approach",
"abstract": "One major challenge with graph-based visual languages is managing the complexity and maintaining a good readability as the density of edges in the graph increases. To improve the graph readability we propose a solution that manages the visual complexity of graph-based diagrams with many hundreds, and even thousands of nodes. It consists in applying heuristics to choose the best path for an edge and attributing to it a color defined automatically according to its origin and destination nodes. A grid system where certain areas are reserved for the display of nodes and others for the edges is used for the layout of the nodes and edges.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One major challenge with graph-based visual languages is managing the complexity and maintaining a good readability as the density of edges in the graph increases. To improve the graph readability we propose a solution that manages the visual complexity of graph-based diagrams with many hundreds, and even thousands of nodes. It consists in applying heuristics to choose the best path for an edge and attributing to it a color defined automatically according to its origin and destination nodes. A grid system where certain areas are reserved for the display of nodes and others for the edges is used for the layout of the nodes and edges.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One major challenge with graph-based visual languages is managing the complexity and maintaining a good readability as the density of edges in the graph increases. To improve the graph readability we propose a solution that manages the visual complexity of graph-based diagrams with many hundreds, and even thousands of nodes. It consists in applying heuristics to choose the best path for an edge and attributing to it a color defined automatically according to its origin and destination nodes. A grid system where certain areas are reserved for the display of nodes and others for the edges is used for the layout of the nodes and edges.",
"fno": "08400069",
"keywords": [
"Graph",
"Incremental Layout",
"Orthogonal Drawing",
"Readability",
"Visual Language"
],
"authors": [
{
"affiliation": "University of Geneva",
"fullName": "Bertrand Ibrahim",
"givenName": "Bertrand",
"surname": "Ibrahim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Geneva",
"fullName": "Honitriniela Randriamparany",
"givenName": "Honitriniela",
"surname": "Randriamparany",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Geneva",
"fullName": "Hidenori Yoshizumi",
"givenName": "Hidenori",
"surname": "Yoshizumi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vl",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-09-01T00:00:00",
"pubType": "proceedings",
"pages": "69",
"year": "2000",
"issn": "1049-2615",
"isbn": "0-7695-0840-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08400067",
"articleId": "12OmNxuo0iX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08400071",
"articleId": "12OmNCfjerk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxvO07y",
"title": "Visual Languages, IEEE Symposium on",
"acronym": "vl",
"groupId": "1000793",
"volume": "0",
"displayVolume": "0",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzsJ7wm",
"doi": "10.1109/VL.1995.520783",
"title": "Improving readability of iconic programs with multiple view object representation",
"normalizedTitle": "Improving readability of iconic programs with multiple view object representation",
"abstract": "One of the most important advantages of an iconic programming language is its readability. In order to improve the readability of complicated iconic programs with many wire intersections and loops, we introduce a technique called \"multiple view object representation\". It means that one program component can be represented as a number of nodes, i.e., it provides layout flexibility. By using the flexibility, programmers can transform a complicated iconic program into a number of simple iconic programs. An iconic programming system was implemented based on the technique and evaluated through practical application construction. The evaluation illustrated that the technique greatly reduces anti-readability factors, such as loops and wire intersections, of complicated programs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the most important advantages of an iconic programming language is its readability. In order to improve the readability of complicated iconic programs with many wire intersections and loops, we introduce a technique called \"multiple view object representation\". It means that one program component can be represented as a number of nodes, i.e., it provides layout flexibility. By using the flexibility, programmers can transform a complicated iconic program into a number of simple iconic programs. An iconic programming system was implemented based on the technique and evaluated through practical application construction. The evaluation illustrated that the technique greatly reduces anti-readability factors, such as loops and wire intersections, of complicated programs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the most important advantages of an iconic programming language is its readability. In order to improve the readability of complicated iconic programs with many wire intersections and loops, we introduce a technique called \"multiple view object representation\". It means that one program component can be represented as a number of nodes, i.e., it provides layout flexibility. By using the flexibility, programmers can transform a complicated iconic program into a number of simple iconic programs. An iconic programming system was implemented based on the technique and evaluated through practical application construction. The evaluation illustrated that the technique greatly reduces anti-readability factors, such as loops and wire intersections, of complicated programs.",
"fno": "70450037",
"keywords": [
"Visual Languages Visual Programming Object Oriented Programming Graphical User Interfaces Improved Iconic Program Readability Multiple View Object Representation Iconic Programming Language Wire Intersections Loops Program Component Nodes Layout Flexibility Complicated Iconic Program Simple Iconic Programs Iconic Programming System Application Construction Anti Readability Factor Reduction"
],
"authors": [
{
"affiliation": "C&C Res. Labs., NEC Corp., Kawasaki, Japan",
"fullName": "Y. Koike",
"givenName": "Y.",
"surname": "Koike",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "C&C Res. Labs., NEC Corp., Kawasaki, Japan",
"fullName": "Y. Maeda",
"givenName": "Y.",
"surname": "Maeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "C&C Res. Labs., NEC Corp., Kawasaki, Japan",
"fullName": "Y. Koseki",
"givenName": "Y.",
"surname": "Koseki",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vl",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-09-01T00:00:00",
"pubType": "proceedings",
"pages": "37",
"year": "1995",
"issn": "1049-2615",
"isbn": "0-8186-7045-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "70450029",
"articleId": "12OmNBOCWtH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "70450045",
"articleId": "12OmNyyeWx9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.