data
dict |
|---|
{
"proceeding": {
"id": "14jQfMYohco",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "14jQfNVqQfL",
"doi": "10.1109/ICME.2018.8486572",
"title": "Saliency Detection by Deep Network with Boundary Refinement and Global Context",
"normalizedTitle": "Saliency Detection by Deep Network with Boundary Refinement and Global Context",
"abstract": "A novel end-to-end fully convolutional neural network for saliency detection is proposed in this paper, aiming at refining the boundary and covering the global context (GBR-Net). Previous CNN based methods for saliency detection are universally accompanied with blurring edge and ambiguous salient object. To tackle this problem, we propose to embed the boundary enhancement block (BEB) into the network to refine edge. It keeps the details by the mutual-coupling con-volutionallayers. Besides, we employ a pooling pyramid that utilizes the multi-level feature informations to search global context, and it also contributes as an auxiliary supervision. The final saliency map is obtained by fusing the edge refinement with global context extraction. Experiments on four benchmark datasets prove that the proposed saliency detection model gains an edge over the state-of-the-art approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A novel end-to-end fully convolutional neural network for saliency detection is proposed in this paper, aiming at refining the boundary and covering the global context (GBR-Net). Previous CNN based methods for saliency detection are universally accompanied with blurring edge and ambiguous salient object. To tackle this problem, we propose to embed the boundary enhancement block (BEB) into the network to refine edge. It keeps the details by the mutual-coupling con-volutionallayers. Besides, we employ a pooling pyramid that utilizes the multi-level feature informations to search global context, and it also contributes as an auxiliary supervision. The final saliency map is obtained by fusing the edge refinement with global context extraction. Experiments on four benchmark datasets prove that the proposed saliency detection model gains an edge over the state-of-the-art approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A novel end-to-end fully convolutional neural network for saliency detection is proposed in this paper, aiming at refining the boundary and covering the global context (GBR-Net). Previous CNN based methods for saliency detection are universally accompanied with blurring edge and ambiguous salient object. To tackle this problem, we propose to embed the boundary enhancement block (BEB) into the network to refine edge. It keeps the details by the mutual-coupling con-volutionallayers. Besides, we employ a pooling pyramid that utilizes the multi-level feature informations to search global context, and it also contributes as an auxiliary supervision. The final saliency map is obtained by fusing the edge refinement with global context extraction. Experiments on four benchmark datasets prove that the proposed saliency detection model gains an edge over the state-of-the-art approaches.",
"fno": "08486572",
"keywords": [
"Kernel",
"Saliency Detection",
"Convolution",
"Image Edge Detection",
"Benchmark Testing",
"Radio Frequency",
"Semantics",
"Saliency Detection",
"Boundary Refinement",
"Global Context",
"Pooling Pyramid"
],
"authors": [
{
"affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, China",
"fullName": "Xin Tan",
"givenName": "Xin",
"surname": "Tan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, China",
"fullName": "Hengliang Zhu",
"givenName": "Hengliang",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, China",
"fullName": "Zhiwen Shao",
"givenName": "Zhiwen",
"surname": "Shao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, China",
"fullName": "Xiaonan Hou",
"givenName": "Xiaonan",
"surname": "Hou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, China",
"fullName": "Yangyang Hao",
"givenName": "Yangyang",
"surname": "Hao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, China",
"fullName": "Lizhuang Ma",
"givenName": "Lizhuang",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-1737-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08486571",
"articleId": "14jQfPGqOcz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08486519",
"articleId": "14jQfNIWuwX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/1992/2910/0/00201517",
"title": "Perceptual grouping using global saliency-enhancing operators",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201517/12OmNBBzomg",
"parentPublication": {
"id": "proceedings/icpr/1992/2910/0",
"title": "1992 11th IAPR International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a309",
"title": "Edge Saliency Map Detection with Texture Suppression",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a309/12OmNqJZgCI",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995743",
"title": "Image saliency: From intrinsic to extrinsic context",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995743/12OmNrF2DMA",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/061P1B08",
"title": "Exploiting local and global patch rarities for saliency detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/061P1B08/12OmNrY3LuC",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2012/4683/0/4683a117",
"title": "Recurrent Refinement for Visual Saliency Estimation in Surveillance Scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2012/4683a117/12OmNy68ECu",
"parentPublication": {
"id": "proceedings/crv/2012/4683/0",
"title": "2012 Ninth Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2012/10/ttp2012101915",
"title": "Context-Aware Saliency Detection",
"doi": null,
"abstractUrl": "/journal/tp/2012/10/ttp2012101915/13rRUx0ger3",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08585158",
"title": "Saliency-Aware Texture Smoothing",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08585158/17D45XeKgwj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545545",
"title": "Saliency Detection using Iterative Dynamic Guided Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545545/17D45Xq6dzG",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600a743",
"title": "Salient Object Detection in Low Contrast Images via Global Convolution and Boundary Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600a743/1iTvkmagnjG",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d469",
"title": "Select, Supplement and Focus for RGB-D Saliency Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d469/1m3nFmLrJ96",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "14jQfMYohco",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "14jQfP4XL27",
"doi": "10.1109/ICME.2018.8486465",
"title": "Structure-Texture Decomposition via Joint Structure Discovery and Texture Smoothing",
"normalizedTitle": "Structure-Texture Decomposition via Joint Structure Discovery and Texture Smoothing",
"abstract": "Structure-texture decomposition from an image (a.k.a. structure-preserving image smoothing) is important for a variety of multimedia, computer vision and graphics tasks. Its performance heavily depends on the precision of indicating where are structural edges to maintain and where are textures to remove. An intuitive thought for constructing indication is to directly execute edge detection on the input image, which however would suffer from rich textures. Feeding inaccurate or erroneous indications into the smoother is at high risk of generating unsatisfactory results. It is almost sure that edge detectors can do a better job on inputs with textures removed. The above two components, say the smoother and the indicator, turn out to be in a chicken-egg situation. To address this issue, we propose a method to jointly detect structural edges and remove textures, by iteratively smoothing the input based on the edges detected from the previous smoothed result and refining the edges based on the newly processed image. Experiments on a number of challenging cases are conducted to show that the edge detection task and the smoothing task can benefit from each other, and reveal the superiority of our method over other state-of-the-art alternatives. Our code is publicly available at https://sites.google.com/view/xjguo/sdts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Structure-texture decomposition from an image (a.k.a. structure-preserving image smoothing) is important for a variety of multimedia, computer vision and graphics tasks. Its performance heavily depends on the precision of indicating where are structural edges to maintain and where are textures to remove. An intuitive thought for constructing indication is to directly execute edge detection on the input image, which however would suffer from rich textures. Feeding inaccurate or erroneous indications into the smoother is at high risk of generating unsatisfactory results. It is almost sure that edge detectors can do a better job on inputs with textures removed. The above two components, say the smoother and the indicator, turn out to be in a chicken-egg situation. To address this issue, we propose a method to jointly detect structural edges and remove textures, by iteratively smoothing the input based on the edges detected from the previous smoothed result and refining the edges based on the newly processed image. Experiments on a number of challenging cases are conducted to show that the edge detection task and the smoothing task can benefit from each other, and reveal the superiority of our method over other state-of-the-art alternatives. Our code is publicly available at https://sites.google.com/view/xjguo/sdts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Structure-texture decomposition from an image (a.k.a. structure-preserving image smoothing) is important for a variety of multimedia, computer vision and graphics tasks. Its performance heavily depends on the precision of indicating where are structural edges to maintain and where are textures to remove. An intuitive thought for constructing indication is to directly execute edge detection on the input image, which however would suffer from rich textures. Feeding inaccurate or erroneous indications into the smoother is at high risk of generating unsatisfactory results. It is almost sure that edge detectors can do a better job on inputs with textures removed. The above two components, say the smoother and the indicator, turn out to be in a chicken-egg situation. To address this issue, we propose a method to jointly detect structural edges and remove textures, by iteratively smoothing the input based on the edges detected from the previous smoothed result and refining the edges based on the newly processed image. Experiments on a number of challenging cases are conducted to show that the edge detection task and the smoothing task can benefit from each other, and reveal the superiority of our method over other state-of-the-art alternatives. Our code is publicly available at https://sites.google.com/view/xjguo/sdts.",
"fno": "08486465",
"keywords": [
"Image Edge Detection",
"Smoothing Methods",
"Detectors",
"Task Analysis",
"Convergence",
"Kernel",
"Computer Vision",
"Image Smoothing",
"Edge Detection",
"Structure Texture Decomposition"
],
"authors": [
{
"affiliation": "School of Computer Software, Tianjin University Key Research Center for Surface Monitoring and Analysis of Cultural Relics, SACH, China",
"fullName": "Xiaojie Guo",
"givenName": "Xiaojie",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Software, Tianjin University Key Research Center for Surface Monitoring and Analysis of Cultural Relics, SACH, China",
"fullName": "Siyuan Li",
"givenName": "Siyuan",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Software, Tianjin University Key Research Center for Surface Monitoring and Analysis of Cultural Relics, SACH, China",
"fullName": "Liang Li",
"givenName": "Liang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Software, Tianjin University Key Research Center for Surface Monitoring and Analysis of Cultural Relics, SACH, China",
"fullName": "Jiawan Zhang",
"givenName": "Jiawan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-1737-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08486529",
"articleId": "14jQfSsa7Ty",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08486536",
"articleId": "14jQfNv299K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2015/7082/0/07177403",
"title": "Edge-preserving image smoothing with local constraints on gradient and intensity",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177403/12OmNqBtiEo",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e517",
"title": "Semantic Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e517/12OmNqN6R7w",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2920/0/00202093",
"title": "A study on the forms of smoothing filters for step and ramp edge detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00202093/12OmNwEJ0DT",
"parentPublication": {
"id": "proceedings/icpr/1992/2920/0",
"title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a361",
"title": "Segment Graph Based Image Filtering: Fast Structure-Preserving Smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a361/12OmNx7ouXK",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/1/4252a214",
"title": "An Improved Edge Preserving Smoothing Method (IEPS)",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a214/12OmNxisQT2",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1998/09/i0916",
"title": "Close-Form Solution and Parameter Selection for Convex Minimization-Based Edge-Preserving Smoothing",
"doi": null,
"abstractUrl": "/journal/tp/1998/09/i0916/13rRUyuNsy6",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486543",
"title": "Pointwise Shape-Adaptive Texture Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486543/14jQfP4XL28",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486448",
"title": "Soft Clustering Guided Image Smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486448/14jQfPTumby",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08585158",
"title": "Saliency-Aware Texture Smoothing",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08585158/17D45XeKgwj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a191",
"title": "Structure-Preserving Bilateral Texture Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a191/1ap5z7EILYY",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "14jQfMYohco",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "14jQfP4XL28",
"doi": "10.1109/ICME.2018.8486543",
"title": "Pointwise Shape-Adaptive Texture Filtering",
"normalizedTitle": "Pointwise Shape-Adaptive Texture Filtering",
"abstract": "The structure-preserving image smoothing is an essential pre-processing to high-level semantic image analysis. In this paper, we introduce a structure-preserving filtering scheme based on a novel and flexible line shift to separate prominent structures from textures. Unlike the previous works, the line shift can form arbitrarily shaped pure texture area for texture smoothing. We integrate the line shift with the prior knowledge of structures into a framework of joint bilateral filtering. This can greatly improve the efficiency of removing textures on pixels that are close to sharp corners or located in long narrow strip areas, while preserve fine structures simultaneously. We demonstrate the performance of the algorithm by testing it on many images with diverse texture patterns.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The structure-preserving image smoothing is an essential pre-processing to high-level semantic image analysis. In this paper, we introduce a structure-preserving filtering scheme based on a novel and flexible line shift to separate prominent structures from textures. Unlike the previous works, the line shift can form arbitrarily shaped pure texture area for texture smoothing. We integrate the line shift with the prior knowledge of structures into a framework of joint bilateral filtering. This can greatly improve the efficiency of removing textures on pixels that are close to sharp corners or located in long narrow strip areas, while preserve fine structures simultaneously. We demonstrate the performance of the algorithm by testing it on many images with diverse texture patterns.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The structure-preserving image smoothing is an essential pre-processing to high-level semantic image analysis. In this paper, we introduce a structure-preserving filtering scheme based on a novel and flexible line shift to separate prominent structures from textures. Unlike the previous works, the line shift can form arbitrarily shaped pure texture area for texture smoothing. We integrate the line shift with the prior knowledge of structures into a framework of joint bilateral filtering. This can greatly improve the efficiency of removing textures on pixels that are close to sharp corners or located in long narrow strip areas, while preserve fine structures simultaneously. We demonstrate the performance of the algorithm by testing it on many images with diverse texture patterns.",
"fno": "08486543",
"keywords": [
"Filtering",
"Image Edge Detection",
"Image Segmentation",
"Strips",
"Smoothing Methods",
"Image Color Analysis",
"Kernel",
"Line Shift",
"Joint Bilateral Filtering",
"Texture Filtering",
"Structure Preserving",
"Image Decomposition"
],
"authors": [
{
"affiliation": "College of Computer Science and Technology, Zhejiang University, Hangzhou, 310027",
"fullName": "Bolu Liu",
"givenName": "Bolu",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Computer Science and Technology, Zhejiang University, Hangzhou, 310027",
"fullName": "Xiqun Lu",
"givenName": "Xiqun",
"surname": "Lu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-1737-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08486462",
"articleId": "14jQfSC65rT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08486540",
"articleId": "14jQfO2OpDv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmlc/2003/7865/4/01259906",
"title": "Wavelet transform-based texture segmentation using feature smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2003/01259906/12OmNBTawx7",
"parentPublication": {
"id": "proceedings/icmlc/2003/7865/4",
"title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e517",
"title": "Semantic Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e517/12OmNqN6R7w",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2012/4683/0/4683a383",
"title": "Regularized Gradient Kernel Anisotropic Diffusion for Better Image Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2012/4683a383/12OmNwCsdAi",
"parentPublication": {
"id": "proceedings/crv/2012/4683/0",
"title": "2012 Ninth Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a361",
"title": "Segment Graph Based Image Filtering: Fast Structure-Preserving Smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a361/12OmNx7ouXK",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460152",
"title": "An edge-preserving filtering framework for visibility restoration",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460152/12OmNzxgHs4",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/06/ttp2013061397",
"title": "Guided Image Filtering",
"doi": null,
"abstractUrl": "/journal/tp/2013/06/ttp2013061397/13rRUxYrbNs",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486465",
"title": "Structure-Texture Decomposition via Joint Structure Discovery and Texture Smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486465/14jQfP4XL27",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486448",
"title": "Soft Clustering Guided Image Smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486448/14jQfPTumby",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08585158",
"title": "Saliency-Aware Texture Smoothing",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08585158/17D45XeKgwj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a191",
"title": "Structure-Preserving Bilateral Texture Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a191/1ap5z7EILYY",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "14jQfMYohco",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "14jQfRlKNy6",
"doi": "10.1109/ICME.2018.8486603",
"title": "Co-Saliency Detection via Hierarchical Consistency Measure",
"normalizedTitle": "Co-Saliency Detection via Hierarchical Consistency Measure",
"abstract": "Co-saliency detection is a newly emerging research topic in multimedia and computer vision, the goal of which is to extract common salient objects from multiple images. Effectively seeking the global consistency among multiple images is critical to the performance. To achieve the goal, this paper designs a novel model with consideration of a hierarchical consistency measure. Different from most existing co-saliency methods that only exploit common features (such as color and texture), this paper further utilizes the shape of object as another cue to evaluate the consistency among common salient objects. More specifically, for each involved image, an intra-image saliency map is firstly generated via a single image saliency detection algorithm. Having the intra-image map constructed, the consistency metrics at object level and superpixel level are designed to measure the corresponding relationship among multiple images and obtain the inter saliency result by considering multiple visual attention features and multiple constrains. Finally, the intra-image and inter-image saliency maps are fused to produce the final map. Experiments on benchmark datasets are conducted to demonstrate the effectiveness of our method, and reveal its advances over other state-of-the-art alternatives.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Co-saliency detection is a newly emerging research topic in multimedia and computer vision, the goal of which is to extract common salient objects from multiple images. Effectively seeking the global consistency among multiple images is critical to the performance. To achieve the goal, this paper designs a novel model with consideration of a hierarchical consistency measure. Different from most existing co-saliency methods that only exploit common features (such as color and texture), this paper further utilizes the shape of object as another cue to evaluate the consistency among common salient objects. More specifically, for each involved image, an intra-image saliency map is firstly generated via a single image saliency detection algorithm. Having the intra-image map constructed, the consistency metrics at object level and superpixel level are designed to measure the corresponding relationship among multiple images and obtain the inter saliency result by considering multiple visual attention features and multiple constrains. Finally, the intra-image and inter-image saliency maps are fused to produce the final map. Experiments on benchmark datasets are conducted to demonstrate the effectiveness of our method, and reveal its advances over other state-of-the-art alternatives.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Co-saliency detection is a newly emerging research topic in multimedia and computer vision, the goal of which is to extract common salient objects from multiple images. Effectively seeking the global consistency among multiple images is critical to the performance. To achieve the goal, this paper designs a novel model with consideration of a hierarchical consistency measure. Different from most existing co-saliency methods that only exploit common features (such as color and texture), this paper further utilizes the shape of object as another cue to evaluate the consistency among common salient objects. More specifically, for each involved image, an intra-image saliency map is firstly generated via a single image saliency detection algorithm. Having the intra-image map constructed, the consistency metrics at object level and superpixel level are designed to measure the corresponding relationship among multiple images and obtain the inter saliency result by considering multiple visual attention features and multiple constrains. Finally, the intra-image and inter-image saliency maps are fused to produce the final map. Experiments on benchmark datasets are conducted to demonstrate the effectiveness of our method, and reveal its advances over other state-of-the-art alternatives.",
"fno": "08486603",
"keywords": [
"Shape",
"Measurement",
"Image Color Analysis",
"Saliency Detection",
"Proposals",
"Color",
"Visualization",
"Co Saliency Detection",
"Shape Attribute",
"Multi Feature Similarity",
"Hierarchical Consistency Measure"
],
"authors": [
{
"affiliation": "School of Computer Software, Tianiin Universitv, Tianiin, China",
"fullName": "Yonghua Zhang",
"givenName": "Yonghua",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Software, Tianiin Universitv, Tianiin, China",
"fullName": "Liang Li",
"givenName": "Liang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Electrical and Information Engineering, Tianjin University, Tianjin, China",
"fullName": "Runmin Cong",
"givenName": "Runmin",
"surname": "Cong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Software, Tianiin Universitv, Tianiin, China",
"fullName": "Xiaojie Guo",
"givenName": "Xiaojie",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Software, Tianiin Universitv, Tianiin, China",
"fullName": "Hui Xu",
"givenName": "Hui",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Software, Tianiin Universitv, Tianiin, China",
"fullName": "Jiawan Zhang",
"givenName": "Jiawan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-1737-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08486461",
"articleId": "14jQfQj2fTq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08486583",
"articleId": "14jQfPn85m9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2017/6067/0/08019413",
"title": "Segmentation guided local proposal fusion for co-saliency detection",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019413/12OmNqFJhSz",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890183",
"title": "Co-saliency detection based on region-level fusion and pixel-level refinement",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890183/12OmNrkBwFU",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298918",
"title": "Co-saliency detection via looking deep and wide",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298918/12OmNxXl5E6",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/12/08444709",
"title": "Personalized Saliency and Its Prediction",
"doi": null,
"abstractUrl": "/journal/tp/2019/12/08444709/13rRUy3gn8N",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a066",
"title": "Saliency-Guided Image Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a066/1cJ0zw9Ceru",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a332",
"title": "Multiple Graph Convolutional Networks for Co-Saliency Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a332/1cdOOu3bAEE",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300f753",
"title": "Saliency-Guided Attention Network for Image-Sentence Matching",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300f753/1hVlvA8iZ9K",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047464",
"title": "Saliency Detection Based on Weighted Saliency Probability",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047464/1iC6yUqkTja",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800j047",
"title": "Adaptive Graph Convolutional Network With Attention Graph Clustering for Co-Saliency Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800j047/1m3obGO2qNG",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a045",
"title": "RSF: A Novel Saliency Fusion Framework for Image Saliency Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a045/1p1gt8mtlNm",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1ap5wvyUHKM",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "1ap5z7EILYY",
"doi": "10.1109/ICVRV.2017.00046",
"title": "Structure-Preserving Bilateral Texture Filtering",
"normalizedTitle": "Structure-Preserving Bilateral Texture Filtering",
"abstract": "Extracting meaningful structures from images with complicated texture patterns is challenging since it is hard to separate structure from texture of similar scale or intensity contrast. In this paper, we propose a structure-preserving bilateral texture filtering algorithm to flatten texture while preserving dominant structures. We design a new scheme, dual-scale patch toggle. That is, patches of two scales are used to represent pixels, the smaller for pixels located at structure edges and the bigger for pixels in texture regions, and then DASM (Directional Anisotropic Structure Measurement) on each pixel is estimated to determine which type of patch to represent it. The algorithm is based on the joint bilateral filtering framework, so it is fast, easy to implement, yet effective for adaptive image smoothing. In particular, our approach outperforms previous methods in terms of preserving small structures. The proposed method achieves excellent results that illustrate its effectiveness and efficiency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Extracting meaningful structures from images with complicated texture patterns is challenging since it is hard to separate structure from texture of similar scale or intensity contrast. In this paper, we propose a structure-preserving bilateral texture filtering algorithm to flatten texture while preserving dominant structures. We design a new scheme, dual-scale patch toggle. That is, patches of two scales are used to represent pixels, the smaller for pixels located at structure edges and the bigger for pixels in texture regions, and then DASM (Directional Anisotropic Structure Measurement) on each pixel is estimated to determine which type of patch to represent it. The algorithm is based on the joint bilateral filtering framework, so it is fast, easy to implement, yet effective for adaptive image smoothing. In particular, our approach outperforms previous methods in terms of preserving small structures. The proposed method achieves excellent results that illustrate its effectiveness and efficiency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Extracting meaningful structures from images with complicated texture patterns is challenging since it is hard to separate structure from texture of similar scale or intensity contrast. In this paper, we propose a structure-preserving bilateral texture filtering algorithm to flatten texture while preserving dominant structures. We design a new scheme, dual-scale patch toggle. That is, patches of two scales are used to represent pixels, the smaller for pixels located at structure edges and the bigger for pixels in texture regions, and then DASM (Directional Anisotropic Structure Measurement) on each pixel is estimated to determine which type of patch to represent it. The algorithm is based on the joint bilateral filtering framework, so it is fast, easy to implement, yet effective for adaptive image smoothing. In particular, our approach outperforms previous methods in terms of preserving small structures. The proposed method achieves excellent results that illustrate its effectiveness and efficiency.",
"fno": "263600a191",
"keywords": [
"Image Filtering",
"Image Texture",
"Joint Bilateral Filtering Framework",
"Structure Preserving Bilateral Texture Filtering",
"Dual Scale Patch Toggle",
"Directional Anisotropic Structure Measurement",
"Adaptive Image Smoothing",
"Smoothing Methods",
"Image Edge Detection",
"Kernel",
"TV",
"Market Research",
"Atmospheric Measurements",
"Image Smoothing",
"Bilateral Texture Filtering",
"Structure Measurement"
],
"authors": [
{
"affiliation": null,
"fullName": "Chengfang Song",
"givenName": "Chengfang",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chunxia Xiao",
"givenName": "Chunxia",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "191-196",
"year": "2017",
"issn": "2375-141X",
"isbn": "978-1-5386-2636-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "263600a186",
"articleId": "1ap5xtOpRvO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "263600a197",
"articleId": "1ap5CqvIj5K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-cg/2005/2473/0/24730275",
"title": "Feature-Preserving Mesh Denoising via Bilateral Normal Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730275/12OmNBJNL1l",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pcspa/2010/4180/0/4180a614",
"title": "Structure Enhancing Bilateral Filtering of Images",
"doi": null,
"abstractUrl": "/proceedings-article/pcspa/2010/4180a614/12OmNBNM9ar",
"parentPublication": {
"id": "proceedings/pcspa/2010/4180/0",
"title": "Pervasive Computing, Signal Porcessing and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521458",
"title": "Separable bilateral filtering for fast video preprocessing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521458/12OmNBlFR2b",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2017/2937/0/2937a014",
"title": "Occlusion Robust Light Field Depth Estimation Using Segmentation Guided Bilateral Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a014/12OmNqGA5iK",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a187",
"title": "Adaptive Bilateral Filter Considering Local Characteristics",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a187/12OmNs5rkVy",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042296",
"title": "Bilateral Filtering of Diffusion Tensor MR Images",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042296/12OmNxwncfI",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a868",
"title": "Bilateral Filtering Based User-Controllable Multi-exemplars Texture Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a868/12OmNzXnNoy",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460152",
"title": "An edge-preserving filtering framework for visibility restoration",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460152/12OmNzxgHs4",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486543",
"title": "Pointwise Shape-Adaptive Texture Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486543/14jQfP4XL28",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2018/5321/0/08499063",
"title": "Content-Aware Bilateral Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2018/08499063/17D45WrVgeY",
"parentPublication": {
"id": "proceedings/bigmm/2018/5321/0",
"title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hVlvA8iZ9K",
"doi": "10.1109/ICCV.2019.00585",
"title": "Saliency-Guided Attention Network for Image-Sentence Matching",
"normalizedTitle": "Saliency-Guided Attention Network for Image-Sentence Matching",
"abstract": "This paper studies the task of matching image and sentence, where learning appropriate representations to bridge the semantic gap between image contents and language appears to be the main challenge. Unlike previous approaches that predominantly deploy symmetrical architecture to represent both modalities, we introduce a Saliency-guided Attention Network (SAN) that is characterized by building an asymmetrical link between vision and language to efficiently learn a fine-grained cross-modal correlation. The proposed SAN mainly includes three components: saliency detector, Saliency-weighted Visual Attention (SVA) module, and Saliency-guided Textual Attention (STA) module. Concretely, the saliency detector provides the visual saliency information to drive both two attention modules. Taking advantage of the saliency information, SVA is able to learn more discriminative visual features. By fusing the visual information from SVA and intra-modal information as a multi-modal guidance, STA affords us powerful textual representations that are synchronized with visual clues. Extensive experiments demonstrate SAN can improve the state-of-the-art results on the benchmark Flickr30K and MSCOCO datasets by a large margin.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper studies the task of matching image and sentence, where learning appropriate representations to bridge the semantic gap between image contents and language appears to be the main challenge. Unlike previous approaches that predominantly deploy symmetrical architecture to represent both modalities, we introduce a Saliency-guided Attention Network (SAN) that is characterized by building an asymmetrical link between vision and language to efficiently learn a fine-grained cross-modal correlation. The proposed SAN mainly includes three components: saliency detector, Saliency-weighted Visual Attention (SVA) module, and Saliency-guided Textual Attention (STA) module. Concretely, the saliency detector provides the visual saliency information to drive both two attention modules. Taking advantage of the saliency information, SVA is able to learn more discriminative visual features. By fusing the visual information from SVA and intra-modal information as a multi-modal guidance, STA affords us powerful textual representations that are synchronized with visual clues. Extensive experiments demonstrate SAN can improve the state-of-the-art results on the benchmark Flickr30K and MSCOCO datasets by a large margin.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper studies the task of matching image and sentence, where learning appropriate representations to bridge the semantic gap between image contents and language appears to be the main challenge. Unlike previous approaches that predominantly deploy symmetrical architecture to represent both modalities, we introduce a Saliency-guided Attention Network (SAN) that is characterized by building an asymmetrical link between vision and language to efficiently learn a fine-grained cross-modal correlation. The proposed SAN mainly includes three components: saliency detector, Saliency-weighted Visual Attention (SVA) module, and Saliency-guided Textual Attention (STA) module. Concretely, the saliency detector provides the visual saliency information to drive both two attention modules. Taking advantage of the saliency information, SVA is able to learn more discriminative visual features. By fusing the visual information from SVA and intra-modal information as a multi-modal guidance, STA affords us powerful textual representations that are synchronized with visual clues. Extensive experiments demonstrate SAN can improve the state-of-the-art results on the benchmark Flickr30K and MSCOCO datasets by a large margin.",
"fno": "480300f753",
"keywords": [
"Image Fusion",
"Image Matching",
"Image Representation",
"Image Retrieval",
"Learning Artificial Intelligence",
"Natural Language Processing",
"Object Detection",
"Recurrent Neural Nets",
"Image Sentence Matching",
"Representation Learning",
"Saliency Guided Attention Network",
"SAN",
"Fine Grained Cross Modal Correlation",
"Saliency Detector",
"SVA",
"Visual Saliency Information",
"Visual Information Fusion",
"Intra Modal Information",
"STA",
"Saliency Guided Textual Attention",
"Saliency Weighted Visual Attention",
"Flickr 30 K Dataset",
"MSCOCO Dataset",
"Visualization",
"Semantics",
"Saliency Detection",
"Task Analysis",
"Correlation",
"Hafnium",
"Computer Architecture"
],
"authors": [
{
"affiliation": "Tianjin University",
"fullName": "Zhong Ji",
"givenName": "Zhong",
"surname": "Ji",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin University",
"fullName": "Haoran Wang",
"givenName": "Haoran",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lancaster University",
"fullName": "Jungong Han",
"givenName": "Jungong",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin University",
"fullName": "Yanwei Pang",
"givenName": "Yanwei",
"surname": "Pang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "5753-5762",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300f743",
"articleId": "1hQqjk13CBW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300f763",
"articleId": "1hQqu3rVib6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sitis/2011/4635/0/4635a298",
"title": "A Novel Visual Saliency Model for Surveillance Video Compression",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2011/4635a298/12OmNCbU3bW",
"parentPublication": {
"id": "proceedings/sitis/2011/4635/0",
"title": "2011 Seventh International Conference on Signal Image Technology & Internet-Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2011/4517/0/4517a240",
"title": "Visual Attention Model with Cross-Layer Saliency Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2011/4517a240/12OmNyuPL6f",
"parentPublication": {
"id": "proceedings/iih-msp/2011/4517/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2013/03/mmu2013030013",
"title": "Partial-Duplicate Image Retrieval via Saliency-Guided Visual Matching",
"doi": null,
"abstractUrl": "/magazine/mu/2013/03/mmu2013030013/13rRUy08MBc",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/12/08444709",
"title": "Personalized Saliency and Its Prediction",
"doi": null,
"abstractUrl": "/journal/tp/2019/12/08444709/13rRUy3gn8N",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2018/5321/0/08499257",
"title": "Saliency-Based Spatiotemporal Attention for Video Captioning",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2018/08499257/17D45WLdYRP",
"parentPublication": {
"id": "proceedings/bigmm/2018/5321/0",
"title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545545",
"title": "Saliency Detection using Iterative Dynamic Guided Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545545/17D45Xq6dzG",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a066",
"title": "Saliency-Guided Image Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a066/1cJ0zw9Ceru",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h253",
"title": "Depth-Induced Multi-Scale Recurrent Attention Network for Saliency Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h253/1hQqfKeF3CE",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a434",
"title": "Visual Saliency Detection guided by Neural Signals",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a434/1kecIxRy5nW",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800n3753",
"title": "Learning Selective Self-Mutual Attention for RGB-D Saliency Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800n3753/1m3nAce3Tm8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwp74rq",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTJIJ2",
"doi": "10.1109/CVPR.1993.341033",
"title": "Multiscale relaxation labeling of fractal images",
"normalizedTitle": "Multiscale relaxation labeling of fractal images",
"abstract": "Multiscale relaxation labeling for segmentation of fractal images is described. The images used are of pavement distress, on which simple edge detection schemes perform poorly. Relaxation labeling is used to improve upon initial edge-based segmentation. A multiscale relaxation technique is used in a pavement distress detection system. To better model pixel interactions, nonlinear terms are included in the relaxation process. Symmetry arguments and careful engineering allow a 93% reduction in the complexity of this approach. To demonstrate the necessity of the multiscale approach, examples with and without multiscale relaxation are shown. It is found that performance is greatly improved by multiscale relaxation.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Multiscale relaxation labeling for segmentation of fractal images is described. The images used are of pavement distress, on which simple edge detection schemes perform poorly. Relaxation labeling is used to improve upon initial edge-based segmentation. A multiscale relaxation technique is used in a pavement distress detection system. To better model pixel interactions, nonlinear terms are included in the relaxation process. Symmetry arguments and careful engineering allow a 93% reduction in the complexity of this approach. To demonstrate the necessity of the multiscale approach, examples with and without multiscale relaxation are shown. It is found that performance is greatly improved by multiscale relaxation.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Multiscale relaxation labeling for segmentation of fractal images is described. The images used are of pavement distress, on which simple edge detection schemes perform poorly. Relaxation labeling is used to improve upon initial edge-based segmentation. A multiscale relaxation technique is used in a pavement distress detection system. To better model pixel interactions, nonlinear terms are included in the relaxation process. Symmetry arguments and careful engineering allow a 93% reduction in the complexity of this approach. To demonstrate the necessity of the multiscale approach, examples with and without multiscale relaxation are shown. It is found that performance is greatly improved by multiscale relaxation.",
"fno": "00341033",
"keywords": [
"Image Segmentation",
"Fractals",
"Image Recognition",
"Fractal Images",
"Relaxation Labeling",
"Segmentation",
"Pavement Distress Detection System",
"Pixel Interactions",
"Complexity",
"Multiscale Relaxation",
"Labeling",
"Fractals",
"Image Segmentation",
"Image Edge Detection",
"Aggregates",
"Computer Science",
"Tellurium",
"Pixel",
"Detectors",
"Image Resolution"
],
"authors": [
{
"affiliation": "Xyplex Corp., Boxborough, MA, USA",
"fullName": "J.A. Choate",
"givenName": "J.A.",
"surname": "Choate",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M.A. Gennert",
"givenName": "M.A.",
"surname": "Gennert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1993-01-01T00:00:00",
"pubType": "proceedings",
"pages": "674-675",
"year": "1993",
"issn": "1063-6919",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00341032",
"articleId": "12OmNy3Agup",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00341034",
"articleId": "12OmNBhpS1F",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ipps/1991/9167/0/00153758",
"title": "Generalized formulation and hypercube algorithms for relaxation labeling",
"doi": null,
"abstractUrl": "/proceedings-article/ipps/1991/00153758/12OmNxVDuLG",
"parentPublication": {
"id": "proceedings/ipps/1991/9167/0",
"title": "Parallel Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1996/3192/6/00550609",
"title": "A unification of relaxation labeling and associative memory",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1996/00550609/12OmNyU63s1",
"parentPublication": {
"id": "proceedings/icassp/1996/3192/6",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vbc/1990/2039/0/00109345",
"title": "Biomedical image segmentation using multiscale orientation fields",
"doi": null,
"abstractUrl": "/proceedings-article/vbc/1990/00109345/12OmNzIUfT5",
"parentPublication": {
"id": "proceedings/vbc/1990/2039/0",
"title": "[1990] Proceedings of the First Conference on Visualization in Biomedical Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1986/02/04767779",
"title": "Relaxation Labeling with Learning Automata",
"doi": null,
"abstractUrl": "/journal/tp/1986/02/04767779/13rRUwI5TRX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/07/07494684",
"title": "LP Relaxation of the Potts Labeling Problem Is as Hard as Any Linear Program",
"doi": null,
"abstractUrl": "/journal/tp/2017/07/07494684/13rRUwInvKN",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1984/03/04767530",
"title": "Adaptive Relaxation Labeling",
"doi": null,
"abstractUrl": "/journal/tp/1984/03/04767530/13rRUwInvlK",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1982/06/04767325",
"title": "Augmented Relaxation Labeling and Dynamic Relaxation Labeling",
"doi": null,
"abstractUrl": "/journal/tp/1982/06/04767325/13rRUwgQpru",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1990/02/i0165",
"title": "Edge-Labeling Using Dictionary-Based Relaxation",
"doi": null,
"abstractUrl": "/journal/tp/1990/02/i0165/13rRUxAASUa",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1997/09/i0949",
"title": "Relaxation Methods for Supervised Image Segmentation",
"doi": null,
"abstractUrl": "/journal/tp/1997/09/i0949/13rRUxCitzk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1994/05/i0538",
"title": "Analysis of Stochastic Automata Algorithm for Relaxation Labeling",
"doi": null,
"abstractUrl": "/journal/tp/1994/05/i0538/13rRUyYjK3D",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrkjVbZ",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTawwj",
"doi": "10.1109/WACV.2015.147",
"title": "Semantic Instance Labeling Leveraging Hierarchical Segmentation",
"normalizedTitle": "Semantic Instance Labeling Leveraging Hierarchical Segmentation",
"abstract": "Most of the approaches for indoor RGBD semantic labeling focus on using pixels or super pixels to train a classifier. In this paper, we implement a higher level segmentation using a hierarchy of super pixels to obtain a better segmentation for training our classifier. By focusing on meaningful segments that conform more directly to objects, regardless of size, we train a random forest of decision trees as a classifier using simple features such as the 3D size, LAB color histogram, width, height, and shape as specified by a histogram of surface normal's. We test our method on the NYU V2 depth dataset, a challenging dataset of cluttered indoor environments. Our experiments using the NYU V2 depth dataset show that our method achieves state of the art results on both a general semantic labeling introduced by the dataset (floor, structure, furniture, and objects) and a more object specific semantic labeling. We show that training a classifier on a segmentation from a hierarchy of super pixels yields better results than training directly on super pixels, patches, or pixels as in previous work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most of the approaches for indoor RGBD semantic labeling focus on using pixels or super pixels to train a classifier. In this paper, we implement a higher level segmentation using a hierarchy of super pixels to obtain a better segmentation for training our classifier. By focusing on meaningful segments that conform more directly to objects, regardless of size, we train a random forest of decision trees as a classifier using simple features such as the 3D size, LAB color histogram, width, height, and shape as specified by a histogram of surface normal's. We test our method on the NYU V2 depth dataset, a challenging dataset of cluttered indoor environments. Our experiments using the NYU V2 depth dataset show that our method achieves state of the art results on both a general semantic labeling introduced by the dataset (floor, structure, furniture, and objects) and a more object specific semantic labeling. We show that training a classifier on a segmentation from a hierarchy of super pixels yields better results than training directly on super pixels, patches, or pixels as in previous work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most of the approaches for indoor RGBD semantic labeling focus on using pixels or super pixels to train a classifier. In this paper, we implement a higher level segmentation using a hierarchy of super pixels to obtain a better segmentation for training our classifier. By focusing on meaningful segments that conform more directly to objects, regardless of size, we train a random forest of decision trees as a classifier using simple features such as the 3D size, LAB color histogram, width, height, and shape as specified by a histogram of surface normal's. We test our method on the NYU V2 depth dataset, a challenging dataset of cluttered indoor environments. Our experiments using the NYU V2 depth dataset show that our method achieves state of the art results on both a general semantic labeling introduced by the dataset (floor, structure, furniture, and objects) and a more object specific semantic labeling. We show that training a classifier on a segmentation from a hierarchy of super pixels yields better results than training directly on super pixels, patches, or pixels as in previous work.",
"fno": "6683b068",
"keywords": [
"Three Dimensional Displays",
"Labeling",
"Semantics",
"Histograms",
"Accuracy",
"Tin",
"Shape"
],
"authors": [
{
"affiliation": null,
"fullName": "Steven Hickson",
"givenName": "Steven",
"surname": "Hickson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Irfan Essa",
"givenName": "Irfan",
"surname": "Essa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Henrik Christensen",
"givenName": "Henrik",
"surname": "Christensen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1068-1075",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-6683-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6683b060",
"articleId": "12OmNBRsVv6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6683b076",
"articleId": "12OmNBCqbE5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2011/4520/0/4520a977",
"title": "CalliGUI: Interactive Labeling of Calligraphic Character Images",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2011/4520a977/12OmNqJq4B9",
"parentPublication": {
"id": "proceedings/icdar/2011/4520/0",
"title": "2011 International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/349P3A38",
"title": "RGB-(D) scene labeling: Features and algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/349P3A38/12OmNrHB1WS",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031a583",
"title": "Chinese Semantic Role Labeling with Hierarchical Semantic Knowledge",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031a583/12OmNx57HOl",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733a203",
"title": "RGB-D Scene Labeling with Multimodal Recurrent Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733a203/12OmNx9FhTz",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a569",
"title": "Height Gradient Histogram (HIGH) for 3D Scene Labeling",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a569/12OmNxvO00R",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b577",
"title": "Instance-Aware Detailed Action Labeling in Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b577/12OmNyXMQc6",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/08/ttp2013081915",
"title": "Learning Hierarchical Features for Scene Labeling",
"doi": null,
"abstractUrl": "/journal/tp/2013/08/ttp2013081915/13rRUxBJhnR",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/07/07516658",
"title": "Tree-Structured Models for Efficient Multi-Cue Scene Labeling",
"doi": null,
"abstractUrl": "/journal/tp/2017/07/07516658/13rRUxBa5yN",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/06/06587713",
"title": "Associative Hierarchical Random Fields",
"doi": null,
"abstractUrl": "/journal/tp/2014/06/06587713/13rRUyuegie",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08590720",
"title": "Semantic Labeling and Instance Segmentation of 3D Point Clouds Using Patch Context Analysis and Multiscale Processing",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08590720/17D45WrVgaC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwdbV00",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrHB1WS",
"doi": "10.1109/CVPR.2012.6247999",
"title": "RGB-(D) scene labeling: Features and algorithms",
"normalizedTitle": "RGB-(D) scene labeling: Features and algorithms",
"abstract": "Scene labeling research has mostly focused on outdoor scenes, leaving the harder case of indoor scenes poorly understood. Microsoft Kinect dramatically changed the landscape, showing great potentials for RGB-D perception (color+depth). Our main objective is to empirically understand the promises and challenges of scene labeling with RGB-D. We use the NYU Depth Dataset as collected and analyzed by Silberman and Fergus [30]. For RGB-D features, we adapt the framework of kernel descriptors that converts local similarities (kernels) to patch descriptors. For contextual modeling, we combine two lines of approaches, one using a superpixel MRF, and the other using a segmentation tree. We find that (1) kernel descriptors are very effective in capturing appearance (RGB) and shape (D) similarities; (2) both superpixel MRF and segmentation tree are useful in modeling context; and (3) the key to labeling accuracy is the ability to efficiently train and test with large-scale data. We improve labeling accuracy on the NYU Dataset from 56.6% to 76.1%. We also apply our approach to image-only scene labeling and improve the accuracy on the Stanford Background Dataset from 79.4% to 82.9%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scene labeling research has mostly focused on outdoor scenes, leaving the harder case of indoor scenes poorly understood. Microsoft Kinect dramatically changed the landscape, showing great potentials for RGB-D perception (color+depth). Our main objective is to empirically understand the promises and challenges of scene labeling with RGB-D. We use the NYU Depth Dataset as collected and analyzed by Silberman and Fergus [30]. For RGB-D features, we adapt the framework of kernel descriptors that converts local similarities (kernels) to patch descriptors. For contextual modeling, we combine two lines of approaches, one using a superpixel MRF, and the other using a segmentation tree. We find that (1) kernel descriptors are very effective in capturing appearance (RGB) and shape (D) similarities; (2) both superpixel MRF and segmentation tree are useful in modeling context; and (3) the key to labeling accuracy is the ability to efficiently train and test with large-scale data. We improve labeling accuracy on the NYU Dataset from 56.6% to 76.1%. We also apply our approach to image-only scene labeling and improve the accuracy on the Stanford Background Dataset from 79.4% to 82.9%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scene labeling research has mostly focused on outdoor scenes, leaving the harder case of indoor scenes poorly understood. Microsoft Kinect dramatically changed the landscape, showing great potentials for RGB-D perception (color+depth). Our main objective is to empirically understand the promises and challenges of scene labeling with RGB-D. We use the NYU Depth Dataset as collected and analyzed by Silberman and Fergus [30]. For RGB-D features, we adapt the framework of kernel descriptors that converts local similarities (kernels) to patch descriptors. For contextual modeling, we combine two lines of approaches, one using a superpixel MRF, and the other using a segmentation tree. We find that (1) kernel descriptors are very effective in capturing appearance (RGB) and shape (D) similarities; (2) both superpixel MRF and segmentation tree are useful in modeling context; and (3) the key to labeling accuracy is the ability to efficiently train and test with large-scale data. We improve labeling accuracy on the NYU Dataset from 56.6% to 76.1%. We also apply our approach to image-only scene labeling and improve the accuracy on the Stanford Background Dataset from 79.4% to 82.9%.",
"fno": "349P3A38",
"keywords": [
"Trees Mathematics",
"Image Colour Analysis",
"Image Segmentation",
"Segmentation Tree",
"RGB D Scene Labeling",
"Scene Labeling Research",
"Outdoor Scenes",
"Indoor Scenes",
"Microsoft Kinect",
"RGB D Perception",
"RGB D Features",
"Kernel Descriptors",
"Local Similarities",
"Patch Descriptors",
"Contextual Modeling",
"Superpixel MRF",
"Labeling",
"Kernel",
"Accuracy",
"Vegetation",
"Image Color Analysis",
"Context Modeling",
"Image Segmentation"
],
"authors": [
{
"affiliation": "Comput. Sci. & Eng., Univ. of Washington, Seattle, WA, USA",
"fullName": "D. Fox",
"givenName": "D.",
"surname": "Fox",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Comput. Sci. & Eng., Univ. of Washington, Seattle, WA, USA",
"fullName": "Liefeng Bo",
"givenName": null,
"surname": "Liefeng Bo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaofeng Ren",
"givenName": null,
"surname": "Xiaofeng Ren",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2759-2766",
"year": "2012",
"issn": "1063-6919",
"isbn": "978-1-4673-1226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "348P3A37",
"articleId": "12OmNB7LvD2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "350P3A39",
"articleId": "12OmNB6UIb0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dicta/2011/4588/0/4588a494",
"title": "A Simple and Practical Solution to the Rigid Body Motion Segmentation Problem Using a RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2011/4588a494/12OmNBtCCEL",
"parentPublication": {
"id": "proceedings/dicta/2011/4588/0",
"title": "2011 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2012/4683/0/4683a024",
"title": "Adaptive RGB-D Localization",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2012/4683a024/12OmNrIrPu5",
"parentPublication": {
"id": "proceedings/crv/2012/4683/0",
"title": "2012 Ninth Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2013/4983/0/4983a023",
"title": "3D Disaster Scene Reconstruction Using a Canine-Mounted RGB-D Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2013/4983a023/12OmNvA1hpf",
"parentPublication": {
"id": "proceedings/crv/2013/4983/0",
"title": "2013 International Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733a203",
"title": "RGB-D Scene Labeling with Multimodal Recurrent Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733a203/12OmNx9FhTz",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995719",
"title": "Object recognition with hierarchical kernel descriptors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995719/12OmNxVDuQc",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a569",
"title": "Height Gradient Histogram (HIGH) for 3D Scene Labeling",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a569/12OmNxvO00R",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298655",
"title": "SUN RGB-D: A RGB-D scene understanding benchmark suite",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298655/12OmNyGbIiB",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300l1828",
"title": "Translate-to-Recognize Networks for RGB-D Scene Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300l1828/1gyrTBK9Ixq",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i579",
"title": "UC-Net: Uncertainty Inspired RGB-D Saliency Detection via Conditional Variational Autoencoders",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i579/1m3nhGh6i1q",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/09/09405467",
"title": "Uncertainty Inspired RGB-D Saliency Detection",
"doi": null,
"abstractUrl": "/journal/tp/2022/09/09405467/1sP15MuA1Hi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNylborE",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyXMQc6",
"doi": "10.1109/WACV.2018.00175",
"title": "Instance-Aware Detailed Action Labeling in Videos",
"normalizedTitle": "Instance-Aware Detailed Action Labeling in Videos",
"abstract": "We address the problem of detailed sequence labeling of complex activities in videos, which aims to assign an action label to every frame. Previous work typically focus on predicting action class labels for each frame in a sequence without reasoning action instances. However, such category-level labeling is inefficient in encoding the global constraints at the action instance level and tends to produce inconsistent results. In this work we consider a fusion approach that exploits the synergy between action detection and sequence labeling for complex activities. To this end, we propose an instance-aware sequence labeling method that utilizes the cues from action instance detection. In particular, we design an LSTM-based fusion network that integrates framewise action labeling and action instance prediction to produce a final consistent labeling. To evaluate our method, we create a large-scale RGBD video dataset on gym activities for sequence labeling and action detection called GADD. The experimental results on GADD dataset show that our method outperforms all the state-of-the-art methods consistently in terms of labeling accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We address the problem of detailed sequence labeling of complex activities in videos, which aims to assign an action label to every frame. Previous work typically focus on predicting action class labels for each frame in a sequence without reasoning action instances. However, such category-level labeling is inefficient in encoding the global constraints at the action instance level and tends to produce inconsistent results. In this work we consider a fusion approach that exploits the synergy between action detection and sequence labeling for complex activities. To this end, we propose an instance-aware sequence labeling method that utilizes the cues from action instance detection. In particular, we design an LSTM-based fusion network that integrates framewise action labeling and action instance prediction to produce a final consistent labeling. To evaluate our method, we create a large-scale RGBD video dataset on gym activities for sequence labeling and action detection called GADD. The experimental results on GADD dataset show that our method outperforms all the state-of-the-art methods consistently in terms of labeling accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We address the problem of detailed sequence labeling of complex activities in videos, which aims to assign an action label to every frame. Previous work typically focus on predicting action class labels for each frame in a sequence without reasoning action instances. However, such category-level labeling is inefficient in encoding the global constraints at the action instance level and tends to produce inconsistent results. In this work we consider a fusion approach that exploits the synergy between action detection and sequence labeling for complex activities. To this end, we propose an instance-aware sequence labeling method that utilizes the cues from action instance detection. In particular, we design an LSTM-based fusion network that integrates framewise action labeling and action instance prediction to produce a final consistent labeling. To evaluate our method, we create a large-scale RGBD video dataset on gym activities for sequence labeling and action detection called GADD. The experimental results on GADD dataset show that our method outperforms all the state-of-the-art methods consistently in terms of labeling accuracy.",
"fno": "488601b577",
"keywords": [
"Feature Extraction",
"Image Colour Analysis",
"Image Fusion",
"Learning Artificial Intelligence",
"Object Detection",
"Video Signal Processing",
"Action Instance Detection",
"Framewise Action Labeling",
"Action Instance Prediction",
"Final Consistent Labeling",
"Video Dataset",
"Action Detection",
"Labeling Accuracy",
"Videos",
"Detailed Sequence Labeling",
"Complex Activities",
"Action Class Labels",
"Reasoning Action Instances",
"Category Level Labeling",
"Action Instance Level",
"Instance Aware Sequence Labeling Method",
"Labeling",
"Videos",
"Task Analysis",
"Feature Extraction",
"Dynamics",
"Three Dimensional Displays",
"Proposals"
],
"authors": [
{
"affiliation": null,
"fullName": "Hongtao Yang",
"givenName": "Hongtao",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xuming He",
"givenName": "Xuming",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fatih Porikli",
"givenName": "Fatih",
"surname": "Porikli",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1577-1586",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-4886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "488601b625",
"articleId": "12OmNqI04HX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "488601b633",
"articleId": "12OmNweBUIV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2008/2174/0/04761187",
"title": "Interactive labeling of facial action units",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761187/12OmNBhZ4li",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851b049",
"title": "Temporal Action Localization in Untrimmed Videos via Multi-stage CNNs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851b049/12OmNy9PrlD",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b417",
"title": "CDC: Convolutional-De-Convolutional Networks for Precise Temporal Action Localization in Untrimmed Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b417/12OmNylKB49",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c176",
"title": "Common Action Discovery and Localization in Unconstrained Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c176/12OmNzZWbPg",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2012/03/06042884",
"title": "The Action Similarity Labeling Challenge",
"doi": null,
"abstractUrl": "/journal/tp/2012/03/06042884/13rRUxcbnDK",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2019/1392/0/139200a038",
"title": "Fine-grained Action Detection in Untrimmed Surveillance Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2019/139200a038/18jXpEwKeyY",
"parentPublication": {
"id": "proceedings/wacvw/2019/1392/0",
"title": "2019 IEEE Winter Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300j907",
"title": "Action Recognition From Single Timestamp Supervision in Untrimmed Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300j907/1gyrccgMLjG",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300a061",
"title": "Hierarchical Self-Attention Network for Action Localization in Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300a061/1hQqogzyv9S",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g063",
"title": "Object-Aware Instance Labeling for Weakly Supervised Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g063/1hQqud7Zj9u",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300f541",
"title": "StartNet: Online Detection of Action Start in Untrimmed Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300f541/1hVllJRiwHS",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cdOEoawzMQ",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cdOTDqbIha",
"doi": "10.1109/ICME.2019.00274",
"title": "Active Semantic Labeling of Street View Point Clouds",
"normalizedTitle": "Active Semantic Labeling of Street View Point Clouds",
"abstract": "Semantic 3D models have shown their importance in many fields such as autonomous driving. However, it remains a tough task to assign semantic labels to various scenes. In this paper, we propose an Active Learning based method for semantic labeling of street view point clouds with a small amount of annotated data samples. The proposed method takes a point cloud and registrated images as the input, and yields a point cloud with semantic labels. We iteratively fine-tunes a network with the ever-enlarging training set to exploit the semantic information of the scene, and fuse the semantic labels in 3D space. To deal with the imbalanced data in street view scenes, a label biased criterion for query selection is proposed to help select images to efficiently improve the performance of the network and the quality of the semantic model. Experimental result shows that the proposed method demands limited human labor and works well in assigning semantic labels to the imbalanced scenes like street view scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Semantic 3D models have shown their importance in many fields such as autonomous driving. However, it remains a tough task to assign semantic labels to various scenes. In this paper, we propose an Active Learning based method for semantic labeling of street view point clouds with a small amount of annotated data samples. The proposed method takes a point cloud and registrated images as the input, and yields a point cloud with semantic labels. We iteratively fine-tunes a network with the ever-enlarging training set to exploit the semantic information of the scene, and fuse the semantic labels in 3D space. To deal with the imbalanced data in street view scenes, a label biased criterion for query selection is proposed to help select images to efficiently improve the performance of the network and the quality of the semantic model. Experimental result shows that the proposed method demands limited human labor and works well in assigning semantic labels to the imbalanced scenes like street view scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Semantic 3D models have shown their importance in many fields such as autonomous driving. However, it remains a tough task to assign semantic labels to various scenes. In this paper, we propose an Active Learning based method for semantic labeling of street view point clouds with a small amount of annotated data samples. The proposed method takes a point cloud and registrated images as the input, and yields a point cloud with semantic labels. We iteratively fine-tunes a network with the ever-enlarging training set to exploit the semantic information of the scene, and fuse the semantic labels in 3D space. To deal with the imbalanced data in street view scenes, a label biased criterion for query selection is proposed to help select images to efficiently improve the performance of the network and the quality of the semantic model. Experimental result shows that the proposed method demands limited human labor and works well in assigning semantic labels to the imbalanced scenes like street view scenes.",
"fno": "955200b588",
"keywords": [
"Image Annotation",
"Image Fusion",
"Image Registration",
"Intelligent Transportation Systems",
"Learning Artificial Intelligence",
"Query Processing",
"Solid Modelling",
"Street View Point Clouds",
"Semantic 3 D Models",
"Semantic Information",
"Street View Scenes",
"Semantic Model",
"Active Semantic Labeling",
"Semantic Label Assignment",
"Active Learning Based Method",
"Annotated Data Samples",
"Image Registration",
"Semantic Lable Fusion",
"3 D Space",
"Imbalanced Data",
"Label Biased Criterion",
"Query Selection",
"Performance Improvement",
"Imbalanced Scenes",
"Autonomous Driving",
"Three Dimensional Displays",
"Semantics",
"Solid Modeling",
"Labeling",
"Image Segmentation",
"Training",
"Task Analysis",
"Semantic",
"Street View",
"Active Learning"
],
"authors": [
{
"affiliation": "Chinese Academy of Sciences; University of Chinese Academy of Sciences",
"fullName": "Yang Zhou",
"givenName": "Yang",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Sciences; University of Chinese Academy of Sciences",
"fullName": "Shuhan Shen",
"givenName": "Shuhan",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Sciences; University of Chinese Academy of Sciences",
"fullName": "Zhanyi Hu",
"givenName": "Zhanyi",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1588-1593",
"year": "2019",
"issn": null,
"isbn": "978-1-5386-9552-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "955200b582",
"articleId": "1cdON132Ocg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "955200b594",
"articleId": "1cdOGB3IQXS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457645",
"title": "Semantic segmentation of street scenes by superpixel co-occurrence and 3D geometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457645/12OmNB8kHUM",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f583",
"title": "Semantically Coherent Co-Segmentation and Reconstruction of Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f583/12OmNweBUJH",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d688",
"title": "Semantic Instance Annotation of Street Scenes by 3D to 2D Label Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d688/12OmNy2ah1y",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a706",
"title": "Multi-view Stereo with Single-View Semantic Mesh Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a706/12OmNy49sQd",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545378",
"title": "3D Geometry-Aware Semantic Labeling of Outdoor Street Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545378/17D45VtKiwd",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a523",
"title": "Fine-Level Semantic Labeling of Large-Scale 3D Model by Active Learning",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a523/17D45WHONjT",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08590720",
"title": "Semantic Labeling and Instance Segmentation of 3D Point Clouds Using Patch Context Analysis and Multiscale Processing",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08590720/17D45WrVgaC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainit/2021/1296/0/129600a400",
"title": "Semantic Segmentation of Street View Image Based on Fully Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ainit/2021/129600a400/1BzWGZ3Ac7K",
"parentPublication": {
"id": "proceedings/ainit/2021/1296/0",
"title": "2021 2nd International Seminar on Artificial Intelligence, Networking and Information Technology (AINIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-&-icivpr/2020/9331/0/09306512",
"title": "Weakly Supervised Semantic Roadside Object Segmentation Using Digital Maps",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-&-icivpr/2020/09306512/1qciedYNVFm",
"parentPublication": {
"id": "proceedings/iciev-&-icivpr/2020/9331/0",
"title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412358",
"title": "3D Semantic Labeling of Photogrammetry Meshes Based on Active Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412358/1tmk0dm5gty",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1i5mkDyiIUg",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1i5mGbOH8oE",
"doi": "10.1109/ICCVW.2019.00505",
"title": "Rethinking Task and Metrics of Instance Segmentation on 3D Point Clouds",
"normalizedTitle": "Rethinking Task and Metrics of Instance Segmentation on 3D Point Clouds",
"abstract": "Instance segmentation on 3D point clouds is one of the most extensively researched areas toward the realization of autonomous cars and robots. Certain existing studies have split input point clouds into small regions such as 1m×1m; one reason for this is that models in the studies cannot consume a large number of points because of the large space complexity. However, because such small regions occasionally include a very small number of instances belonging to the same class, an evaluation using existing metrics such as mAP is largely affected by the category recognition performance. To address these problems, we propose a new method with space complexity O(Np) such that large regions can be consumed, as well as novel metrics for tasks that are independent of the categories or size of the inputs. Our method learns a mapping from input point clouds to an embedding space, where the embeddings form clusters for each instance and distinguish instances using these clusters during testing. Our method achieves state-of-the-art performance using both existing and the proposed metrics. Moreover, we show that our new metric can evaluate the performance of a task without being affected by any other condition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Instance segmentation on 3D point clouds is one of the most extensively researched areas toward the realization of autonomous cars and robots. Certain existing studies have split input point clouds into small regions such as 1m×1m; one reason for this is that models in the studies cannot consume a large number of points because of the large space complexity. However, because such small regions occasionally include a very small number of instances belonging to the same class, an evaluation using existing metrics such as mAP is largely affected by the category recognition performance. To address these problems, we propose a new method with space complexity O(Np) such that large regions can be consumed, as well as novel metrics for tasks that are independent of the categories or size of the inputs. Our method learns a mapping from input point clouds to an embedding space, where the embeddings form clusters for each instance and distinguish instances using these clusters during testing. Our method achieves state-of-the-art performance using both existing and the proposed metrics. Moreover, we show that our new metric can evaluate the performance of a task without being affected by any other condition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Instance segmentation on 3D point clouds is one of the most extensively researched areas toward the realization of autonomous cars and robots. Certain existing studies have split input point clouds into small regions such as 1m×1m; one reason for this is that models in the studies cannot consume a large number of points because of the large space complexity. However, because such small regions occasionally include a very small number of instances belonging to the same class, an evaluation using existing metrics such as mAP is largely affected by the category recognition performance. To address these problems, we propose a new method with space complexity O(Np) such that large regions can be consumed, as well as novel metrics for tasks that are independent of the categories or size of the inputs. Our method learns a mapping from input point clouds to an embedding space, where the embeddings form clusters for each instance and distinguish instances using these clusters during testing. Our method achieves state-of-the-art performance using both existing and the proposed metrics. Moreover, we show that our new metric can evaluate the performance of a task without being affected by any other condition.",
"fno": "09022256",
"keywords": [
"Computational Complexity",
"Image Recognition",
"Image Segmentation",
"Pattern Clustering",
"Stereo Image Processing",
"Space Complexity",
"Category Recognition Performance",
"Embedding Space",
"3 D Point Cloud Segmentation",
"Autonomous Cars",
"M AP",
"Three Dimensional Displays",
"Measurement",
"Image Segmentation",
"Task Analysis",
"Semantics",
"Complexity Theory",
"Feature Extraction",
"Machine Learning",
"3 D Point Clouds",
"Instance Segmentation"
],
"authors": [
{
"affiliation": "The University of Tokyo",
"fullName": "Kosuke Arase",
"givenName": "Kosuke",
"surname": "Arase",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, RIKEN",
"fullName": "Yusuke Mukuta",
"givenName": "Yusuke",
"surname": "Mukuta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, RIKEN",
"fullName": "Tatsuya Harada",
"givenName": "Tatsuya",
"surname": "Harada",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "4105-4113",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5023-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09022574",
"articleId": "1i5mBQj8koM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09022145",
"articleId": "1i5mMZbmVMs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2020/07/08590720",
"title": "Semantic Labeling and Instance Segmentation of 3D Point Clouds Using Patch Context Analysis and Multiscale Processing",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08590720/17D45WrVgaC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g098",
"title": "AdaFit: Rethinking Learning-based Normal Estimation on Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g098/1BmF4R3uGWI",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0458",
"title": "Point-set Distances for Learning Representations of 3D Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0458/1BmIlciDLLW",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2022/5963/0/596300a013",
"title": "Content-Aware Adaptive Point Cloud Delivery",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2022/596300a013/1JvaKlHaUVi",
"parentPublication": {
"id": "proceedings/bigmm/2022/5963/0",
"title": "2022 IEEE Eighth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a551",
"title": "NeuralBF: Neural Bilateral Filtering for Top-down Instance Segmentation on Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a551/1KxUNTuCTkY",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805456",
"title": "LassoNet: Deep Lasso-Selection of 3D Point Clouds",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805456/1cG4x9FpdAI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200b588",
"title": "Active Semantic Labeling of Street View Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200b588/1cdOTDqbIha",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e573",
"title": "SpSequenceNet: Semantic Segmentation Network on 4D Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e573/1m3nZX73Hna",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412532",
"title": "Joint Semantic-Instance Segmentation of 3D Point Clouds: Instance Separation and Semantic Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412532/1tmhqaV5g2s",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2020/9594/0/09450255",
"title": "SAPCGAN: Self-Attention based Generative Adversarial Network for Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2020/09450255/1uqFO2IzUIg",
"parentPublication": {
"id": "proceedings/icci*cc/2020/9594/0",
"title": "2020 IEEE 19th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3oqBV286c",
"doi": "10.1109/CVPR42600.2020.00857",
"title": "AdaCoSeg: Adaptive Shape Co-Segmentation With Group Consistency Loss",
"normalizedTitle": "AdaCoSeg: Adaptive Shape Co-Segmentation With Group Consistency Loss",
"abstract": "We introduce AdaCoSeg, a deep neural network architecture for adaptive co-segmentation of a set of 3D shapes represented as point clouds. Differently from the familiar single-instance segmentation problem, co-segmentation is intrinsically contextual: how a shape is segmented can vary depending on the set it is in. Hence, our network features an adaptive learning module to produce a consistent shape segmentation which adapts to a set. Specifically, given an input set of unsegmented shapes, we first employ an offline pre-trained part prior network to propose per-shape parts. Then the co-segmentation network iteratively and jointly optimizes the part labelings across the set subjected to a novel group consistency loss defined by matrix ranks. While the part prior network can be trained with noisy and inconsistently segmented shapes, the final output of AdaSeg is a consistent part labeling for the input set, with each shape segmented into up to (a user-specified) K parts. Overall, our method is weakly supervised, producing segmentations tailored to the test set, without consistent ground-truth segmentations. We show qualitative and quantitative results from AdaSeg and evaluate it via ablation studies and comparisons to state-of-the-art co-segmentation methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce AdaCoSeg, a deep neural network architecture for adaptive co-segmentation of a set of 3D shapes represented as point clouds. Differently from the familiar single-instance segmentation problem, co-segmentation is intrinsically contextual: how a shape is segmented can vary depending on the set it is in. Hence, our network features an adaptive learning module to produce a consistent shape segmentation which adapts to a set. Specifically, given an input set of unsegmented shapes, we first employ an offline pre-trained part prior network to propose per-shape parts. Then the co-segmentation network iteratively and jointly optimizes the part labelings across the set subjected to a novel group consistency loss defined by matrix ranks. While the part prior network can be trained with noisy and inconsistently segmented shapes, the final output of AdaSeg is a consistent part labeling for the input set, with each shape segmented into up to (a user-specified) K parts. Overall, our method is weakly supervised, producing segmentations tailored to the test set, without consistent ground-truth segmentations. We show qualitative and quantitative results from AdaSeg and evaluate it via ablation studies and comparisons to state-of-the-art co-segmentation methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce AdaCoSeg, a deep neural network architecture for adaptive co-segmentation of a set of 3D shapes represented as point clouds. Differently from the familiar single-instance segmentation problem, co-segmentation is intrinsically contextual: how a shape is segmented can vary depending on the set it is in. Hence, our network features an adaptive learning module to produce a consistent shape segmentation which adapts to a set. Specifically, given an input set of unsegmented shapes, we first employ an offline pre-trained part prior network to propose per-shape parts. Then the co-segmentation network iteratively and jointly optimizes the part labelings across the set subjected to a novel group consistency loss defined by matrix ranks. While the part prior network can be trained with noisy and inconsistently segmented shapes, the final output of AdaSeg is a consistent part labeling for the input set, with each shape segmented into up to (a user-specified) K parts. Overall, our method is weakly supervised, producing segmentations tailored to the test set, without consistent ground-truth segmentations. We show qualitative and quantitative results from AdaSeg and evaluate it via ablation studies and comparisons to state-of-the-art co-segmentation methods.",
"fno": "716800i540",
"keywords": [
"Image Segmentation",
"Learning Artificial Intelligence",
"Neural Nets",
"Optimisation",
"Ada Co Seg",
"Deep Neural Network Architecture",
"Adaptive Co Segmentation",
"Point Clouds",
"Adaptive Learning Module",
"Shape Segmentation",
"Input Set",
"Unsegmented Shapes",
"Co Segmentation Network",
"Group Consistency Loss",
"Noisy Shapes",
"Inconsistently Segmented Shapes",
"Test Set",
"Ground Truth Segmentations",
"Adaptive Shape Co Segmentation",
"Part Labeling Optimization",
"Matrix Ranks",
"Pre Trained Part Prior Network",
"Single Instance Segmentation Problem",
"Shape",
"Labeling",
"Three Dimensional Displays",
"Adaptive Systems",
"Noise Measurement",
"Adaptation Models",
"Image Segmentation"
],
"authors": [
{
"affiliation": "Simon Fraser University; National University of Defense Technology",
"fullName": "Chenyang Zhu",
"givenName": "Chenyang",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Defense Technology",
"fullName": "Kai Xu",
"givenName": "Kai",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research; IIT Bombay",
"fullName": "Siddhartha Chaudhuri",
"givenName": "Siddhartha",
"surname": "Chaudhuri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research",
"fullName": "Li Yi",
"givenName": "Li",
"surname": "Yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stanford University",
"fullName": "Leonidas J. Guibas",
"givenName": "Leonidas J.",
"surname": "Guibas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University",
"fullName": "Hao Zhang",
"givenName": "Hao",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "8540-8549",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800i530",
"articleId": "1m3nquyHjry",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800i550",
"articleId": "1m3nT03buuI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118d182",
"title": "Clothing Co-parsing by Joint Image Segmentation and Labeling",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118d182/12OmNAg7jXy",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118077",
"title": "Shape description from imperfect and incomplete data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118077/12OmNqNXEt9",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118a240",
"title": "Co-segmentation of Textured 3D Shapes with Sparse Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a240/12OmNvUaNm0",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f583",
"title": "Semantically Coherent Co-Segmentation and Reconstruction of Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f583/12OmNweBUJH",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840b297",
"title": "Co-segmentation by Composition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840b297/12OmNxGAKSo",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/his/2009/3745/1/3745a243",
"title": "Shape-Based Level Set Method for Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/his/2009/3745a243/12OmNxWcH4M",
"parentPublication": {
"id": "proceedings/his/2009/3745/1",
"title": "Hybrid Intelligent Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851a411",
"title": "MCMC Shape Sampling for Image Segmentation with Nonparametric Shape Priors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851a411/12OmNy9PrjS",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457d881",
"title": "Object Co-skeletonization with Co-segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d881/12OmNzahcbP",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2018/5321/0/08499435",
"title": "Multiple Kernel Boosting Based Two-level RGBD Image Co-Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2018/08499435/17D45WK5Alz",
"parentPublication": {
"id": "proceedings/bigmm/2018/5321/0",
"title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i489",
"title": "BAE-NET: Branched Autoencoder for Shape Co-Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i489/1hVlrqDzCta",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1wzs0vrjyWQ",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yJYqacU9na",
"doi": "10.1109/CVPRW53098.2021.00487",
"title": "Semantic labeling of lidar point clouds for UAV applications",
"normalizedTitle": "Semantic labeling of lidar point clouds for UAV applications",
"abstract": "Small Unmanned Aerial Vehicle (UAV) platforms equipped with compact laser scanners provides a low-cost option for many applications, including surveillance, mapping, and reconnaissance. For these applications, semantic segmentation or semantic labeling of each point in the lidar point cloud, is important for scene-understanding. In this work, we evaluate methods for semantic segmentation of three-dimensional (3D) point clouds of outdoor scenes measured with a laser scanner mounted on a small UAV. We compare the performance of four different semantic segmentation methods, which are all applied in a scan-by-scan fashion, on semi-sparse laser data. The best method achieves 95.3% on the three classes ground, vegetation, and vehicle in terms of mean intersection over union (mIoU) on a previously unseen scene from a different geographical area. The results demonstrate that it is possible to achieve good performance on the semantic segmentation task on data measured using a combination of a small UAV and a compact laser scanner.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Small Unmanned Aerial Vehicle (UAV) platforms equipped with compact laser scanners provides a low-cost option for many applications, including surveillance, mapping, and reconnaissance. For these applications, semantic segmentation or semantic labeling of each point in the lidar point cloud, is important for scene-understanding. In this work, we evaluate methods for semantic segmentation of three-dimensional (3D) point clouds of outdoor scenes measured with a laser scanner mounted on a small UAV. We compare the performance of four different semantic segmentation methods, which are all applied in a scan-by-scan fashion, on semi-sparse laser data. The best method achieves 95.3% on the three classes ground, vegetation, and vehicle in terms of mean intersection over union (mIoU) on a previously unseen scene from a different geographical area. The results demonstrate that it is possible to achieve good performance on the semantic segmentation task on data measured using a combination of a small UAV and a compact laser scanner.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Small Unmanned Aerial Vehicle (UAV) platforms equipped with compact laser scanners provides a low-cost option for many applications, including surveillance, mapping, and reconnaissance. For these applications, semantic segmentation or semantic labeling of each point in the lidar point cloud, is important for scene-understanding. In this work, we evaluate methods for semantic segmentation of three-dimensional (3D) point clouds of outdoor scenes measured with a laser scanner mounted on a small UAV. We compare the performance of four different semantic segmentation methods, which are all applied in a scan-by-scan fashion, on semi-sparse laser data. The best method achieves 95.3% on the three classes ground, vegetation, and vehicle in terms of mean intersection over union (mIoU) on a previously unseen scene from a different geographical area. The results demonstrate that it is possible to achieve good performance on the semantic segmentation task on data measured using a combination of a small UAV and a compact laser scanner.",
"fno": "489900e309",
"keywords": [
"Training",
"Three Dimensional Displays",
"Laser Radar",
"Image Color Analysis",
"Semantics",
"Measurement By Laser Beam",
"Vegetation Mapping"
],
"authors": [
{
"affiliation": "Swedish Defence Research Agency (FOI),Linköping,Sweden",
"fullName": "Maria Axelsson",
"givenName": "Maria",
"surname": "Axelsson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Swedish Defence Research Agency (FOI),Linköping,Sweden",
"fullName": "Max Holmberg",
"givenName": "Max",
"surname": "Holmberg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Swedish Defence Research Agency (FOI),Linköping,Sweden",
"fullName": "Sabina Serra",
"givenName": "Sabina",
"surname": "Serra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Swedish Defence Research Agency (FOI),Linköping,Sweden",
"fullName": "Hannes Ovrén",
"givenName": "Hannes",
"surname": "Ovrén",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Swedish Defence Research Agency (FOI),Linköping,Sweden",
"fullName": "Michael Tulldahl",
"givenName": "Michael",
"surname": "Tulldahl",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4309-4316",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4899-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4.899E304",
"articleId": "1yVzUzT2lpu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "489900e317",
"articleId": "1wzs0TLqn1C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2013/3022/0/3022a714",
"title": "Semantic Parsing of Street Scene Images Using 3D LiDAR Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a714/12OmNBBQZnh",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881424",
"title": "Assessing the Usability of LiDAR Processing Methods on UAV Data",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881424/12OmNvAAtyW",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imccc/2016/1195/0/07774873",
"title": "A New Positioning Method for Indoor Laser Navigation on Under-Determined Condition",
"doi": null,
"abstractUrl": "/proceedings-article/imccc/2016/07774873/12OmNwoxSaK",
"parentPublication": {
"id": "proceedings/imccc/2016/1195/0",
"title": "2016 Sixth International Conference on Instrumentation & Measurement, Computer, Communication and Control (IMCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itng/2015/8828/0/8828a680",
"title": "An Experiment of Mutual Interference between Automotive LIDAR Scanners",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2015/8828a680/12OmNyLA5Ae",
"parentPublication": {
"id": "proceedings/itng/2015/8828/0",
"title": "2015 12th International Conference on Information Technology - New Generations (ITNG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204114",
"title": "Object recognition in 3D lidar data with recurrent neural network",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204114/12OmNyUnENw",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2018/9184/0/918400a479",
"title": "A Secure LiDAR with AES-Based Side-Channel Fingerprinting",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2018/918400a479/17D45WK5AlB",
"parentPublication": {
"id": "proceedings/candarw/2018/9184/0",
"title": "2018 Sixth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900e418",
"title": "PointMotionNet: Point-Wise Motion Learning for Large-Scale LiDAR Point Clouds Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900e418/1G563yaYq1q",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2019/9151/0/08730820",
"title": "Road Surface Condition Inspection Using a Laser Scanner Mounted on an Autonomous Driving Car",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2019/08730820/1aDSDZbZcNa",
"parentPublication": {
"id": "proceedings/percom-workshops/2019/9151/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedeg/2019/1704/0/08734372",
"title": "Dasometric Analysis Applying Terrestrial Laser Scanner and Conventional Techniques for the Estimation of Aboveground Biomass in a Forest of the Inter-Andean Valley in Ecuador",
"doi": null,
"abstractUrl": "/proceedings-article/icedeg/2019/08734372/1aPuXsMAheU",
"parentPublication": {
"id": "proceedings/icedeg/2019/1704/0",
"title": "2019 Sixth International Conference on eDemocracy & eGovernment (ICEDEG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150622",
"title": "DALES: A Large-scale Aerial LiDAR Data Set for Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150622/1lPH2XaJcB2",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC8dg91",
"title": "2016 IEEE International Conference on Big Data (Big Data)",
"acronym": "big-data",
"groupId": "1802964",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBtCCyS",
"doi": "10.1109/BigData.2016.7840677",
"title": "TelcoFlow: Visual exploration of collective behaviors based on telco data",
"normalizedTitle": "TelcoFlow: Visual exploration of collective behaviors based on telco data",
"abstract": "Collective behavior is an important concept defined to capture behavioral patterns emerged among the crowd spontaneously. In social science, people's behaviors can be regarded as temporal transitions between a set of typical states (e.g., home and work) which are always associated with certain locations. This fact leads to an interesting research topic in developing ways to explore people's collective behavior patterns through movement analysis, which is our focus in this paper. In recent years, massive volumes of spatiotemporal data generated by mobile phones, called telco data, bring an unprecedented opportunity to study collective behaviors in terms of large coverage and fine-grained resolution. However, distilling valuable collective behavior patterns from the large scale of telco data is not an easy task. The challenge is rooted in two aspects, including the data uncertainty as well as the lack of methods to characterize, compare and understand dynamic crowd behaviors, which triggers the use of visual analytics to take full advantage of machines' computational power as well as human's domain knowledge and cognitive abilities. In this paper, we propose TelcoFlow, a comprehensive visual analytics system which incorporates advanced quantitative analyses (e.g., statebased behavior model) and intuitive visualizations (e.g., an extended flow view embedded with state glyphs) to support an efficient and in-depth analysis of collective behaviors based on telco data. Case studies with a real-world dataset and expert interviews are carried out to demonstrate the effectiveness of our system for analysts to gain insights into collective behaviors and facilitate various analytical tasks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Collective behavior is an important concept defined to capture behavioral patterns emerged among the crowd spontaneously. In social science, people's behaviors can be regarded as temporal transitions between a set of typical states (e.g., home and work) which are always associated with certain locations. This fact leads to an interesting research topic in developing ways to explore people's collective behavior patterns through movement analysis, which is our focus in this paper. In recent years, massive volumes of spatiotemporal data generated by mobile phones, called telco data, bring an unprecedented opportunity to study collective behaviors in terms of large coverage and fine-grained resolution. However, distilling valuable collective behavior patterns from the large scale of telco data is not an easy task. The challenge is rooted in two aspects, including the data uncertainty as well as the lack of methods to characterize, compare and understand dynamic crowd behaviors, which triggers the use of visual analytics to take full advantage of machines' computational power as well as human's domain knowledge and cognitive abilities. In this paper, we propose TelcoFlow, a comprehensive visual analytics system which incorporates advanced quantitative analyses (e.g., statebased behavior model) and intuitive visualizations (e.g., an extended flow view embedded with state glyphs) to support an efficient and in-depth analysis of collective behaviors based on telco data. Case studies with a real-world dataset and expert interviews are carried out to demonstrate the effectiveness of our system for analysts to gain insights into collective behaviors and facilitate various analytical tasks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Collective behavior is an important concept defined to capture behavioral patterns emerged among the crowd spontaneously. In social science, people's behaviors can be regarded as temporal transitions between a set of typical states (e.g., home and work) which are always associated with certain locations. This fact leads to an interesting research topic in developing ways to explore people's collective behavior patterns through movement analysis, which is our focus in this paper. In recent years, massive volumes of spatiotemporal data generated by mobile phones, called telco data, bring an unprecedented opportunity to study collective behaviors in terms of large coverage and fine-grained resolution. However, distilling valuable collective behavior patterns from the large scale of telco data is not an easy task. The challenge is rooted in two aspects, including the data uncertainty as well as the lack of methods to characterize, compare and understand dynamic crowd behaviors, which triggers the use of visual analytics to take full advantage of machines' computational power as well as human's domain knowledge and cognitive abilities. In this paper, we propose TelcoFlow, a comprehensive visual analytics system which incorporates advanced quantitative analyses (e.g., statebased behavior model) and intuitive visualizations (e.g., an extended flow view embedded with state glyphs) to support an efficient and in-depth analysis of collective behaviors based on telco data. Case studies with a real-world dataset and expert interviews are carried out to demonstrate the effectiveness of our system for analysts to gain insights into collective behaviors and facilitate various analytical tasks.",
"fno": "07840677",
"keywords": [
"Behavioural Sciences Computing",
"Cognition",
"Data Visualisation",
"Graphical User Interfaces",
"Smart Phones",
"Spatiotemporal Phenomena",
"Telco Flow",
"Visual Exploration",
"Telco Data",
"Crowd Behavioral Patterns",
"Social Science",
"Temporal Transitions",
"Movement Analysis",
"Spatio Temporal Data",
"Mobile Phones",
"Collective Behavior Patterns",
"Data Uncertainty",
"Dynamic Crowd Behaviors",
"Visual Analytics",
"Machine Computational Power",
"Human Domain Knowledge Abilities",
"Human Cognitive Abilities",
"Quantitative Analysis",
"State Based Behavior Model",
"Extended Flow View Embedding",
"State Glyphs",
"Real World Dataset",
"Mobile Handsets",
"Visual Analytics",
"Data Models",
"Uncertainty",
"Analytical Models",
"Electronic Mail",
"Visual Analytics",
"Collective Behavior",
"Telco Data",
"Movement",
"Spatio Temporal Analysis"
],
"authors": [
{
"affiliation": "Department of Computer Science and Engineering, Hong Kong University of Science and Technology",
"fullName": "Yixian Zheng",
"givenName": "Yixian",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Hong Kong University of Science and Technology",
"fullName": "Wenchao Wu",
"givenName": "Wenchao",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Hong Kong University of Science and Technology",
"fullName": "Haipeng Zeng",
"givenName": "Haipeng",
"surname": "Zeng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Design and Innovation, Tongji University",
"fullName": "Nan Cao",
"givenName": "Nan",
"surname": "Cao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Hong Kong University of Science and Technology",
"fullName": "Huamin Qu",
"givenName": "Huamin",
"surname": "Qu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Noah's Ark Lab, Huawei Technologies Investment Co. Ltd",
"fullName": "Mingxuan Yuan",
"givenName": "Mingxuan",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Noah's Ark Lab, Huawei Technologies Investment Co. Ltd",
"fullName": "Jia Zeng",
"givenName": "Jia",
"surname": "Zeng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer and Information Science, University of Macau",
"fullName": "Lionel M. Ni",
"givenName": "Lionel M.",
"surname": "Ni",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "big-data",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "843-852",
"year": "2016",
"issn": null,
"isbn": "978-1-4673-9005-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07840676",
"articleId": "12OmNxZ2Gje",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07840678",
"articleId": "12OmNxxNbRm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cso/2011/4335/0/4335a635",
"title": "Outlier Detection on Large-Scale Collective Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2011/4335a635/12OmNASraQP",
"parentPublication": {
"id": "proceedings/cso/2011/4335/0",
"title": "2011 Fourth International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2017/3932/0/07962466",
"title": "Experimental Study of Telco Localization Methods",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2017/07962466/12OmNBsue1t",
"parentPublication": {
"id": "proceedings/mdm/2017/3932/0",
"title": "2017 18th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/5/3336j134",
"title": "Modelling Dynamic System for Collective Learning Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336j134/12OmNwI8cee",
"parentPublication": {
"id": "proceedings/csse/2008/3336/5",
"title": "2008 International Conference on Computer Science and Software Engineering (CSSE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/skg/2007/3007/0/30070062",
"title": "A Process Algebraic Approach to Modeling Collective Behaviors in Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/skg/2007/30070062/12OmNzlD97v",
"parentPublication": {
"id": "proceedings/skg/2007/3007/0",
"title": "Semantics, Knowledge and Grid, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2018/4133/0/413301a280",
"title": "TBD-DP: Telco Big Data Visual Analytics with Data Postdiction",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2018/413301a280/12OmNznkK6o",
"parentPublication": {
"id": "proceedings/mdm/2018/4133/0",
"title": "2018 19th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192730",
"title": "TelCoVis: Visual Exploration of Co-occurrence in Urban Human Mobility Based on Telco Data",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192730/13rRUxbTMyU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/09/08663312",
"title": "LDA Ensembles for Interactive Exploration and Categorization of Behaviors",
"doi": null,
"abstractUrl": "/journal/tg/2020/09/08663312/18exteDwa5O",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vizsec/2018/8194/0/08709223",
"title": "User Behavior Map: Visual Exploration for Cyber Security Session Data",
"doi": null,
"abstractUrl": "/proceedings-article/vizsec/2018/08709223/19ZL2MVnbB6",
"parentPublication": {
"id": "proceedings/vizsec/2018/8194/0",
"title": "2018 IEEE Symposium on Visualization for Cyber Security (VizSec)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2019/3363/0/336300a395",
"title": "Outdoor Localization Framework with Telco Data",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2019/336300a395/1ckrO4DMtl6",
"parentPublication": {
"id": "proceedings/mdm/2019/3363/0",
"title": "2019 20th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2022/04/09200783",
"title": "Context-Aware Telco Outdoor Localization",
"doi": null,
"abstractUrl": "/journal/tm/2022/04/09200783/1ndVlIRyvx6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBNM93m",
"title": "European Dependable Computing Conference",
"acronym": "edcc",
"groupId": "1001308",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvSbBNH",
"doi": "10.1109/EDCC.2012.19",
"title": "Applying Data Mining for Detecting Anomalies in Satellites",
"normalizedTitle": "Applying Data Mining for Detecting Anomalies in Satellites",
"abstract": "Telemetry data is the only source for identifying/predicting anomalies in artificial satellites. Human specialists analyze these data in real time, but its large volume, makes this analysis extremely difficult. In this experience paper we study the hypothesis of using clustering algorithms to help operators and analysts to perform telemetry analysis. Two real cases of satellite anomalies in Brazilian space missions are considered, allowing assessing and comparing the effectiveness of two clustering algorithms (K-means and Expectation Maximization), which showed to be effective in the case study where several telemetry channels tended to deliver outlier values and, in these cases, could support the satellite operators by allowing the anticipation of anomalies. However for silent problems, where there was just a small variation in a single telemetry, the algorithms were not as efficient.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Telemetry data is the only source for identifying/predicting anomalies in artificial satellites. Human specialists analyze these data in real time, but its large volume, makes this analysis extremely difficult. In this experience paper we study the hypothesis of using clustering algorithms to help operators and analysts to perform telemetry analysis. Two real cases of satellite anomalies in Brazilian space missions are considered, allowing assessing and comparing the effectiveness of two clustering algorithms (K-means and Expectation Maximization), which showed to be effective in the case study where several telemetry channels tended to deliver outlier values and, in these cases, could support the satellite operators by allowing the anticipation of anomalies. However for silent problems, where there was just a small variation in a single telemetry, the algorithms were not as efficient.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Telemetry data is the only source for identifying/predicting anomalies in artificial satellites. Human specialists analyze these data in real time, but its large volume, makes this analysis extremely difficult. In this experience paper we study the hypothesis of using clustering algorithms to help operators and analysts to perform telemetry analysis. Two real cases of satellite anomalies in Brazilian space missions are considered, allowing assessing and comparing the effectiveness of two clustering algorithms (K-means and Expectation Maximization), which showed to be effective in the case study where several telemetry channels tended to deliver outlier values and, in these cases, could support the satellite operators by allowing the anticipation of anomalies. However for silent problems, where there was just a small variation in a single telemetry, the algorithms were not as efficient.",
"fno": "4671a212",
"keywords": [
"Space Systems",
"Anomaly Detection",
"Clustering"
],
"authors": [
{
"affiliation": null,
"fullName": "Denise Rotondi Azevedo",
"givenName": "Denise Rotondi",
"surname": "Azevedo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ana Maria Ambrósio",
"givenName": "Ana Maria",
"surname": "Ambrósio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marco Vieira",
"givenName": "Marco",
"surname": "Vieira",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "edcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-05-01T00:00:00",
"pubType": "proceedings",
"pages": "212-217",
"year": "2012",
"issn": null,
"isbn": "978-0-7695-4671-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4671a203",
"articleId": "12OmNyeWdP5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4671a218",
"articleId": "12OmNAle6rJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ares/2008/3102/0/3102a204",
"title": "A Framework for Detecting Anomalies in VoIP Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ares/2008/3102a204/12OmNApLGnx",
"parentPublication": {
"id": "proceedings/ares/2008/3102/0",
"title": "2008 Third International Conference on Availability, Reliability and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/npc/2008/3354/0/3354a198",
"title": "Detecting Network-Wide Traffic Anomalies Based on Spatial HMM",
"doi": null,
"abstractUrl": "/proceedings-article/npc/2008/3354a198/12OmNApu5w3",
"parentPublication": {
"id": "proceedings/npc/2008/3354/0",
"title": "Network and Parallel Computing Workshops, IFIP International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smc-it/2017/3462/0/3462a149",
"title": "Space Weather Conditions during the Galaxy 15 and Telstar 401 Satellites Anomalies",
"doi": null,
"abstractUrl": "/proceedings-article/smc-it/2017/3462a149/12OmNAtaS37",
"parentPublication": {
"id": "proceedings/smc-it/2017/3462/0",
"title": "2017 6th International Conference on Space Mission Challenges for Information Technology (SMC-IT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2012/4637/0/4637a478",
"title": "An Unsupervised Anomaly Detection Approach for Spacecraft Based on Normal Behavior Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2012/4637a478/12OmNwKGArO",
"parentPublication": {
"id": "proceedings/icicta/2012/4637/0",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aero/2011/7350/0/05747656",
"title": "Epidemiology of satellite anomalies and failures: A subsystem-centric approach",
"doi": null,
"abstractUrl": "/proceedings-article/aero/2011/05747656/12OmNzd7bE7",
"parentPublication": {
"id": "proceedings/aero/2011/7350/0",
"title": "IEEE Aerospace Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258192",
"title": "Online mining for association rules and collective anomalies in data streams",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258192/17D45W2WyyI",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/indis/2018/0194/0/019400a001",
"title": "Flowzilla: A Methodology for Detecting Data Transfer Anomalies in Research Networks",
"doi": null,
"abstractUrl": "/proceedings-article/indis/2018/019400a001/181W9oAYHYI",
"parentPublication": {
"id": "proceedings/indis/2018/0194/0",
"title": "2018 IEEE/ACM Innovating the Network for Data-Intensive Science (INDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2023/02/10061588",
"title": "Detecting Anomalies in Small Unmanned Aerial Systems via Graphical Normalizing Flows",
"doi": null,
"abstractUrl": "/magazine/ex/2023/02/10061588/1Lk2Nd30JPy",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-smartcity-dss/2019/2058/0/205800c270",
"title": "Topology-Aware Event Sequence Mining for Understanding HPC System Behavior and Detecting Anomalies",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2019/205800c270/1dPoym8e7W8",
"parentPublication": {
"id": "proceedings/hpcc-smartcity-dss/2019/2058/0",
"title": "2019 IEEE 21st International Conference on High Performance Computing and Communications; IEEE 17th International Conference on Smart City; IEEE 5th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2020/9074/0/907400a081",
"title": "A Method to Detecting Artifact Anomalies in A Microservice Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2020/907400a081/1rvCrf2eiSk",
"parentPublication": {
"id": "proceedings/icpads/2020/9074/0",
"title": "2020 IEEE 26th International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNykCcdo",
"title": "2018 IEEE Pacific Visualization Symposium (PacificVis)",
"acronym": "pacificvis",
"groupId": "1001657",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNym2c5B",
"doi": "10.1109/PacificVis.2018.00027",
"title": "Visual Analysis of Collective Anomalies Through High-Order Correlation Graph",
"normalizedTitle": "Visual Analysis of Collective Anomalies Through High-Order Correlation Graph",
"abstract": "Detecting, analyzing and reasoning collective anomalies is important for many real-life application domains such as facility monitoring, software analysis and security. The main challenges include the overwhelming number of low-risk events and their multifaceted relationships which form the collective anomaly, the diversity in various data and anomaly types, and the difficulty to incorporate domain knowledge in the anomaly analysis process. In this paper, we propose a novel concept of high-order correlation graph (HOCG). Compared with the previous correlation graph definition, HOCG achieves better user interactivity, computational scalability, and domain generality through synthesizing heterogeneous types of nodes, attributes, and multifaceted relationships in a single graph. We design elaborate visual metaphors, interaction models, and the coordinated multiple view based interface to allow users to fully unleash the visual analytics power over HOCG. We conduct case studies in two real-life application domains, i.e., facility monitoring and software analysis. The results demonstrate the effectiveness of HOCG in the overview of point anomalies, detection of collective anomalies, and reasoning process of root cause analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Detecting, analyzing and reasoning collective anomalies is important for many real-life application domains such as facility monitoring, software analysis and security. The main challenges include the overwhelming number of low-risk events and their multifaceted relationships which form the collective anomaly, the diversity in various data and anomaly types, and the difficulty to incorporate domain knowledge in the anomaly analysis process. In this paper, we propose a novel concept of high-order correlation graph (HOCG). Compared with the previous correlation graph definition, HOCG achieves better user interactivity, computational scalability, and domain generality through synthesizing heterogeneous types of nodes, attributes, and multifaceted relationships in a single graph. We design elaborate visual metaphors, interaction models, and the coordinated multiple view based interface to allow users to fully unleash the visual analytics power over HOCG. We conduct case studies in two real-life application domains, i.e., facility monitoring and software analysis. The results demonstrate the effectiveness of HOCG in the overview of point anomalies, detection of collective anomalies, and reasoning process of root cause analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Detecting, analyzing and reasoning collective anomalies is important for many real-life application domains such as facility monitoring, software analysis and security. The main challenges include the overwhelming number of low-risk events and their multifaceted relationships which form the collective anomaly, the diversity in various data and anomaly types, and the difficulty to incorporate domain knowledge in the anomaly analysis process. In this paper, we propose a novel concept of high-order correlation graph (HOCG). Compared with the previous correlation graph definition, HOCG achieves better user interactivity, computational scalability, and domain generality through synthesizing heterogeneous types of nodes, attributes, and multifaceted relationships in a single graph. We design elaborate visual metaphors, interaction models, and the coordinated multiple view based interface to allow users to fully unleash the visual analytics power over HOCG. We conduct case studies in two real-life application domains, i.e., facility monitoring and software analysis. The results demonstrate the effectiveness of HOCG in the overview of point anomalies, detection of collective anomalies, and reasoning process of root cause analysis.",
"fno": "142401a150",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Graph Theory",
"Security Of Data",
"Collective Anomaly Detection",
"Collective Anomaly Reasoning",
"Collective Anomaly Analysis",
"Correlation Graph Definition",
"Visual Analytics Power",
"Anomaly Analysis Process",
"Anomaly Types",
"Multifaceted Relationships",
"High Order Correlation Graph",
"Visual Analysis",
"Root Cause Analysis",
"Point Anomalies",
"HOCG",
"Anomaly Detection",
"Correlation",
"Data Visualization",
"Monitoring",
"Visual Analytics",
"Software",
"Correlation Graph Visualization",
"Collective Anomaly"
],
"authors": [
{
"affiliation": null,
"fullName": "Jun Tao",
"givenName": "Jun",
"surname": "Tao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lei Shi",
"givenName": "Lei",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhou Zhuang",
"givenName": "Zhou",
"surname": "Zhuang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Congcong Huang",
"givenName": "Congcong",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rulei Yu",
"givenName": "Rulei",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Purui Su",
"givenName": "Purui",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chaoli Wang",
"givenName": "Chaoli",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yang Chen",
"givenName": "Yang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pacificvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-04-01T00:00:00",
"pubType": "proceedings",
"pages": "150-159",
"year": "2018",
"issn": "2165-8773",
"isbn": "978-1-5386-1424-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "142401a140",
"articleId": "12OmNy2rS1M",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "142401a160",
"articleId": "12OmNBpVPS1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cis/2011/4584/0/4584a993",
"title": "A Wavelet-Based Detection Approach to Traffic Anomalies",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2011/4584a993/12OmNwHQB5r",
"parentPublication": {
"id": "proceedings/cis/2011/4584/0",
"title": "2011 Seventh International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcse/2009/3570/2/3570b323",
"title": "Extraction of Magnetic Local Anomalies Based on GIS",
"doi": null,
"abstractUrl": "/proceedings-article/wcse/2009/3570b323/12OmNxHJ9vS",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issre/2017/0941/0/0941a090",
"title": "A Fault Correlation Approach to Detect Performance Anomalies in Virtual Network Function Chains",
"doi": null,
"abstractUrl": "/proceedings-article/issre/2017/0941a090/12OmNyYm2C1",
"parentPublication": {
"id": "proceedings/issre/2017/0941/0",
"title": "2017 IEEE 28th International Symposium on Software Reliability Engineering (ISSRE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2011/4408/0/4408a527",
"title": "Detection of Cross-Channel Anomalies from Multiple Data Channels",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2011/4408a527/12OmNyoAA7d",
"parentPublication": {
"id": "proceedings/icdm/2011/4408/0",
"title": "2011 IEEE 11th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2017/09/mco2017090076",
"title": "Thwarting DoS Attacks: A Framework for Detection based on Collective Anomalies and Clustering",
"doi": null,
"abstractUrl": "/magazine/co/2017/09/mco2017090076/13rRUB7a0Wt",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08587186",
"title": "Visual Analysis of Collective Anomalies Using Faceted High-Order Correlation Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08587186/17D45Vw15wQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258192",
"title": "Online mining for association rules and collective anomalies in data streams",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258192/17D45W2WyyI",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifip-networking/2018/08/0/08697027",
"title": "CellPAD: Detecting Performance Anomalies in Cellular Networks via Regression Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/ifip-networking/2018/08697027/19wB81mgdr2",
"parentPublication": {
"id": "proceedings/ifip-networking/2018/08/0",
"title": "2018 IFIP Networking Conference (IFIP Networking) and Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/05/09740038",
"title": "CSCAD: Correlation Structure-Based Collective Anomaly Detection in Complex System",
"doi": null,
"abstractUrl": "/journal/tk/2023/05/09740038/1BWZdToFUVW",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2020/7624/0/762400a598",
"title": "Collective Anomaly Detection for Multivariate Data using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2020/762400a598/1uGYtR0xPvq",
"parentPublication": {
"id": "proceedings/csci/2020/7624/0",
"title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzTH0FY",
"title": "2011 IEEE 11th International Conference on Data Mining",
"acronym": "icdm",
"groupId": "1000179",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyoAA7d",
"doi": "10.1109/ICDM.2011.51",
"title": "Detection of Cross-Channel Anomalies from Multiple Data Channels",
"normalizedTitle": "Detection of Cross-Channel Anomalies from Multiple Data Channels",
"abstract": "We identify and formulate a novel problem: cross channel anomaly detection from multiple data channels. Cross channel anomalies are common amongst the individual channel anomalies, and are often portent of significant events. Using spectral approaches, we propose a two-stage detection method: anomaly detection at a single-channel level, followed by the detection of cross-channel anomalies from the amalgamation of single channel anomalies. Our mathematical analysis shows that our method is likely to reduce the false alarm rate. We demonstrate our method in two applications: document understanding with multiple text corpora, and detection of repeated anomalies in video surveillance. The experimental results consistently demonstrate the superior performance of our method compared with related state-of-art methods, including the one-class SVM and principal component pursuit. In addition, our framework can be deployed in a decentralized manner, lending itself for large scale data stream analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We identify and formulate a novel problem: cross channel anomaly detection from multiple data channels. Cross channel anomalies are common amongst the individual channel anomalies, and are often portent of significant events. Using spectral approaches, we propose a two-stage detection method: anomaly detection at a single-channel level, followed by the detection of cross-channel anomalies from the amalgamation of single channel anomalies. Our mathematical analysis shows that our method is likely to reduce the false alarm rate. We demonstrate our method in two applications: document understanding with multiple text corpora, and detection of repeated anomalies in video surveillance. The experimental results consistently demonstrate the superior performance of our method compared with related state-of-art methods, including the one-class SVM and principal component pursuit. In addition, our framework can be deployed in a decentralized manner, lending itself for large scale data stream analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We identify and formulate a novel problem: cross channel anomaly detection from multiple data channels. Cross channel anomalies are common amongst the individual channel anomalies, and are often portent of significant events. Using spectral approaches, we propose a two-stage detection method: anomaly detection at a single-channel level, followed by the detection of cross-channel anomalies from the amalgamation of single channel anomalies. Our mathematical analysis shows that our method is likely to reduce the false alarm rate. We demonstrate our method in two applications: document understanding with multiple text corpora, and detection of repeated anomalies in video surveillance. The experimental results consistently demonstrate the superior performance of our method compared with related state-of-art methods, including the one-class SVM and principal component pursuit. In addition, our framework can be deployed in a decentralized manner, lending itself for large scale data stream analysis.",
"fno": "4408a527",
"keywords": [
"Spectral Methods",
"Anomaly Detection",
"Topic Detection"
],
"authors": [
{
"affiliation": null,
"fullName": "Duc Son Pham",
"givenName": "Duc Son",
"surname": "Pham",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Budhaditya Saha",
"givenName": "Budhaditya",
"surname": "Saha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dinh Q. Phung",
"givenName": "Dinh Q.",
"surname": "Phung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Svetha Venkatesh",
"givenName": "Svetha",
"surname": "Venkatesh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-12-01T00:00:00",
"pubType": "proceedings",
"pages": "527-536",
"year": "2011",
"issn": "1550-4786",
"isbn": "978-0-7695-4408-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4408a517",
"articleId": "12OmNzZ5o9N",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4408a537",
"articleId": "12OmNxRF78A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/npc/2008/3354/0/3354a198",
"title": "Detecting Network-Wide Traffic Anomalies Based on Spatial HMM",
"doi": null,
"abstractUrl": "/proceedings-article/npc/2008/3354a198/12OmNApu5w3",
"parentPublication": {
"id": "proceedings/npc/2008/3354/0",
"title": "Network and Parallel Computing Workshops, IFIP International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2011/4584/0/4584a993",
"title": "A Wavelet-Based Detection Approach to Traffic Anomalies",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2011/4584a993/12OmNwHQB5r",
"parentPublication": {
"id": "proceedings/cis/2011/4584/0",
"title": "2011 Seventh International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mines/2009/3843/2/3843b358",
"title": "Discovering Host Anomalies in Multi-source Information",
"doi": null,
"abstractUrl": "/proceedings-article/mines/2009/3843b358/12OmNx8Ouqr",
"parentPublication": {
"id": "proceedings/mines/2009/3843/2",
"title": "Multimedia Information Networking and Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispan/2008/3125/0/3125a266",
"title": "Detection and Identification of Anomalies in Wireless Mesh Networks Using Principal Component Analysis (PCA)",
"doi": null,
"abstractUrl": "/proceedings-article/ispan/2008/3125a266/12OmNzZWbKQ",
"parentPublication": {
"id": "proceedings/ispan/2008/3125/0",
"title": "Parallel Architectures, Algorithms, and Networks, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258192",
"title": "Online mining for association rules and collective anomalies in data streams",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258192/17D45W2WyyI",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifip-networking/2018/08/0/08697027",
"title": "CellPAD: Detecting Performance Anomalies in Cellular Networks via Regression Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/ifip-networking/2018/08697027/19wB81mgdr2",
"parentPublication": {
"id": "proceedings/ifip-networking/2018/08/0",
"title": "2018 IFIP Networking Conference (IFIP Networking) and Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/2/260702a127",
"title": "Improving the Detection of Sequential Anomalies Associated with a Loop",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260702a127/1cYitK9EG6k",
"parentPublication": {
"id": "compsac/2019/2607/2",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issre/2020/9870/0/987000a048",
"title": "Unsupervised Detection of Microservice Trace Anomalies through Service-Level Deep Bayesian Networks",
"doi": null,
"abstractUrl": "/proceedings-article/issre/2020/987000a048/1oFGK31Ozsc",
"parentPublication": {
"id": "proceedings/issre/2020/9870/0",
"title": "2020 IEEE 31st International Symposium on Software Reliability Engineering (ISSRE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issre/2020/9870/0/987000a059",
"title": "How Far Have We Come in Detecting Anomalies in Distributed Systems? An Empirical Study with a Statement-level Fault Injection Method",
"doi": null,
"abstractUrl": "/proceedings-article/issre/2020/987000a059/1oFGKIdVdQI",
"parentPublication": {
"id": "proceedings/issre/2020/9870/0",
"title": "2020 IEEE 31st International Symposium on Software Reliability Engineering (ISSRE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428386",
"title": "Cross-Scene Person Trajectory Anomaly Detection Based on Re-Identification",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428386/1uim16kMRVu",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKipN",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"acronym": "big-data",
"groupId": "1802964",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45W2WyyI",
"doi": "10.1109/BigData.2017.8258192",
"title": "Online mining for association rules and collective anomalies in data streams",
"normalizedTitle": "Online mining for association rules and collective anomalies in data streams",
"abstract": "When analyzing streaming data, the results can depreciate in value faster than the analysis can be completed and results deployed. This is certainly the case in the area of anomaly detection, where detecting a potential problem as it is occurring (or in the early stages) can permit corrective behavior. However, most anomaly detection methods focus on point anomalies, whilst many fraudulent behaviors could be detected only through collective analysis of sequences of data in practice. Moreover, anomaly detection systems often stop at detecting anomalies; they typically do not provide information about how the features (attributes) of anomalies relate to each other or to those in normal states. The goal of this research is to create a distributed system that allows for the detection of collective anomalies from streaming data, and to provide a richer context of information about the anomalies besides their presence. To accomplish this, we (a) re-engineered an online sequence anomaly detection algorithm and (b) designed new algorithms for targeted association mining to run on a streaming, distributed environment. Our experiments, conducted on both synthetic and real-world data sets, demonstrated that the proposed framework is able to achieve near real-time response in detecting anomalies and extracting information pertaining to the anomalies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When analyzing streaming data, the results can depreciate in value faster than the analysis can be completed and results deployed. This is certainly the case in the area of anomaly detection, where detecting a potential problem as it is occurring (or in the early stages) can permit corrective behavior. However, most anomaly detection methods focus on point anomalies, whilst many fraudulent behaviors could be detected only through collective analysis of sequences of data in practice. Moreover, anomaly detection systems often stop at detecting anomalies; they typically do not provide information about how the features (attributes) of anomalies relate to each other or to those in normal states. The goal of this research is to create a distributed system that allows for the detection of collective anomalies from streaming data, and to provide a richer context of information about the anomalies besides their presence. To accomplish this, we (a) re-engineered an online sequence anomaly detection algorithm and (b) designed new algorithms for targeted association mining to run on a streaming, distributed environment. Our experiments, conducted on both synthetic and real-world data sets, demonstrated that the proposed framework is able to achieve near real-time response in detecting anomalies and extracting information pertaining to the anomalies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When analyzing streaming data, the results can depreciate in value faster than the analysis can be completed and results deployed. This is certainly the case in the area of anomaly detection, where detecting a potential problem as it is occurring (or in the early stages) can permit corrective behavior. However, most anomaly detection methods focus on point anomalies, whilst many fraudulent behaviors could be detected only through collective analysis of sequences of data in practice. Moreover, anomaly detection systems often stop at detecting anomalies; they typically do not provide information about how the features (attributes) of anomalies relate to each other or to those in normal states. The goal of this research is to create a distributed system that allows for the detection of collective anomalies from streaming data, and to provide a richer context of information about the anomalies besides their presence. To accomplish this, we (a) re-engineered an online sequence anomaly detection algorithm and (b) designed new algorithms for targeted association mining to run on a streaming, distributed environment. Our experiments, conducted on both synthetic and real-world data sets, demonstrated that the proposed framework is able to achieve near real-time response in detecting anomalies and extracting information pertaining to the anomalies.",
"fno": "08258192",
"keywords": [
"Itemsets",
"Anomaly Detection",
"Data Models",
"Computational Modeling",
"Sparks",
"Association Mining",
"Anomaly Detection",
"Streaming Data",
"Online Processing",
"Collective Anomalies"
],
"authors": [
{
"affiliation": "Center for Visual and Decision Informatics, University of Louisiana at Lafayette, Lafayette, USA",
"fullName": "Shaaban Abbady",
"givenName": "Shaaban",
"surname": "Abbady",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Visual and Decision Informatics, University of Louisiana at Lafayette, Lafayette, USA",
"fullName": "Cheng-Yuan Ke",
"givenName": "Cheng-Yuan",
"surname": "Ke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Visual and Decision Informatics, University of Louisiana at Lafayette, Lafayette, USA",
"fullName": "Jennifer Lavergne",
"givenName": "Jennifer",
"surname": "Lavergne",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Visual and Decision Informatics, University of Louisiana at Lafayette, Lafayette, USA",
"fullName": "Jian Chen",
"givenName": "Jian",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Visual and Decision Informatics, University of Louisiana at Lafayette, Lafayette, USA",
"fullName": "Vijay Raghavan",
"givenName": "Vijay",
"surname": "Raghavan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computing, University of South Alabama, Mobile, USA",
"fullName": "Ryan Benton",
"givenName": "Ryan",
"surname": "Benton",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "big-data",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-12-01T00:00:00",
"pubType": "proceedings",
"pages": "2370-2379",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2715-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08258191",
"articleId": "17D45WrVgeU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08258193",
"articleId": "17D45VsBTUj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/big-data/2015/9926/0/07363865",
"title": "Online anomaly detection over Big Data streams",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363865/12OmNBNM8Mw",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mines/2009/3843/2/3843b358",
"title": "Discovering Host Anomalies in Multi-source Information",
"doi": null,
"abstractUrl": "/proceedings-article/mines/2009/3843b358/12OmNx8Ouqr",
"parentPublication": {
"id": "proceedings/mines/2009/3843/2",
"title": "Multimedia Information Networking and Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2018/1424/0/142401a150",
"title": "Visual Analysis of Collective Anomalies Through High-Order Correlation Graph",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2018/142401a150/12OmNym2c5B",
"parentPublication": {
"id": "proceedings/pacificvis/2018/1424/0",
"title": "2018 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2011/4408/0/4408a527",
"title": "Detection of Cross-Channel Anomalies from Multiple Data Channels",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2011/4408a527/12OmNyoAA7d",
"parentPublication": {
"id": "proceedings/icdm/2011/4408/0",
"title": "2011 IEEE 11th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08587186",
"title": "Visual Analysis of Collective Anomalies Using Faceted High-Order Correlation Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08587186/17D45Vw15wQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2018/9288/0/928800b435",
"title": "EXAD: A System for Explainable Anomaly Detection on Big Data Traces",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2018/928800b435/18jXEvhecU0",
"parentPublication": {
"id": "proceedings/icdmw/2018/9288/0",
"title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/2/260702a127",
"title": "Improving the Detection of Sequential Anomalies Associated with a Loop",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260702a127/1cYitK9EG6k",
"parentPublication": {
"id": "compsac/2019/2607/2",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09006354",
"title": "Fast Anomaly Detection in Multiple Multi-Dimensional Data Streams",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09006354/1hJrRPCKQg0",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2020/7624/0/762400a598",
"title": "Collective Anomaly Detection for Multivariate Data using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2020/762400a598/1uGYtR0xPvq",
"parentPublication": {
"id": "proceedings/csci/2020/7624/0",
"title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smds/2021/0058/0/005800a142",
"title": "Track Before Detect: A Novel Approach For Unsupervised Anomaly Detection In Time Series",
"doi": null,
"abstractUrl": "/proceedings-article/smds/2021/005800a142/1yeQvCFJDtS",
"parentPublication": {
"id": "proceedings/smds/2021/0058/0",
"title": "2021 IEEE International Conference on Smart Data Services (SMDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1kwqNHC4Fy0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1kwqPpJuVvq",
"doi": "10.1109/ICME46284.2020.9102889",
"title": "Statistical Detection Of Collective Data Fraud",
"normalizedTitle": "Statistical Detection Of Collective Data Fraud",
"abstract": "Statistical divergence is widely applied in multimedia processing, basically due to regularity and interpretable features displayed in data. However, in a broader range of data realm, these advantages may no longer be feasible, and therefore a more general approach is required. In data detection, statistical divergence can be used as a similarity measurement based on collective features. In this paper, we present a collective detection technique based on statistical divergence. The technique extracts distribution similarities among data collections, and then uses the statistical divergence to detect collective anomalies. Evaluation shows that it is applicable in the real world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Statistical divergence is widely applied in multimedia processing, basically due to regularity and interpretable features displayed in data. However, in a broader range of data realm, these advantages may no longer be feasible, and therefore a more general approach is required. In data detection, statistical divergence can be used as a similarity measurement based on collective features. In this paper, we present a collective detection technique based on statistical divergence. The technique extracts distribution similarities among data collections, and then uses the statistical divergence to detect collective anomalies. Evaluation shows that it is applicable in the real world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Statistical divergence is widely applied in multimedia processing, basically due to regularity and interpretable features displayed in data. However, in a broader range of data realm, these advantages may no longer be feasible, and therefore a more general approach is required. In data detection, statistical divergence can be used as a similarity measurement based on collective features. In this paper, we present a collective detection technique based on statistical divergence. The technique extracts distribution similarities among data collections, and then uses the statistical divergence to detect collective anomalies. Evaluation shows that it is applicable in the real world.",
"fno": "09102889",
"keywords": [
"Fraud",
"Multimedia Computing",
"Statistical Distributions",
"Statistical Detection",
"Collective Data Fraud",
"Statistical Divergence",
"Interpretable Features",
"Data Realm",
"Data Detection",
"Collective Features",
"Collective Detection Technique",
"Data Collections",
"Multimedia Processing",
"Similarity Measurement",
"Distribution Similarities",
"Data Collection",
"Optimization",
"Gaussian Distribution",
"Nickel",
"Distortion",
"Anomaly Detection",
"Streaming Media",
"Statistical Divergence",
"Collective",
"Fraud",
"Detection"
],
"authors": [
{
"affiliation": "Shanghai Jiao Tong University,Shanghai,China,200240",
"fullName": "Ruoyu Wang",
"givenName": "Ruoyu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Peking University,Beijing,China",
"fullName": "Xiaobo Hu",
"givenName": "Xiaobo",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of New South Wales,Sydney,Australia",
"fullName": "Daniel Sun",
"givenName": "Daniel",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Shanghai,China,200240",
"fullName": "Guoqiang Li",
"givenName": "Guoqiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of New South Wales,Sydney,Australia",
"fullName": "Raymond Wong",
"givenName": "Raymond",
"surname": "Wong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Data61, CSIRO,Australia",
"fullName": "Shiping Chen",
"givenName": "Shiping",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NEC Corporation,Biometrics Research Laboratories,Japan",
"fullName": "Jianquan Liu",
"givenName": "Jianquan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-1331-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09102912",
"articleId": "1kwrfVs1B2o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09102885",
"articleId": "1kwr8VaBnIA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ic3/2017/3077/0/08284299",
"title": "Fraud detection and frequent pattern matching in insurance claims using data mining techniques",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2017/08284299/12OmNANBZrf",
"parentPublication": {
"id": "proceedings/ic3/2017/3077/0",
"title": "2017 Tenth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460482",
"title": "Closed-form information-theoretic divergences for statistical mixtures",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460482/12OmNBiygxG",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccima/2001/1312/0/13120044",
"title": "Statistical Physics Model for the Collective Price Fluctuations of Portfolios",
"doi": null,
"abstractUrl": "/proceedings-article/iccima/2001/13120044/12OmNzWfp8U",
"parentPublication": {
"id": "proceedings/iccima/2001/1312/0",
"title": "Computational Intelligence and Multimedia Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2017/09/mco2017090076",
"title": "Thwarting DoS Attacks: A Framework for Detection based on Collective Anomalies and Clustering",
"doi": null,
"abstractUrl": "/magazine/co/2017/09/mco2017090076/13rRUB7a0Wt",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875953",
"title": "Volume-Preserving Mapping and Registration for Collective Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875953/13rRUwdIOUO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258192",
"title": "Online mining for association rules and collective anomalies in data streams",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258192/17D45W2WyyI",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/skg/2018/0441/0/08703943",
"title": "Collective Anomaly Detection Using Big Data Distributed Stream Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/skg/2018/08703943/19JEb9MsYhO",
"parentPublication": {
"id": "proceedings/skg/2018/0441/0",
"title": "2018 14th International Conference on Semantics, Knowledge and Grids (SKG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2020/10/08695738",
"title": "Fraud Detection in Dynamic Interaction Network",
"doi": null,
"abstractUrl": "/journal/tk/2020/10/08695738/19sOFVxD2x2",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/05/09740038",
"title": "CSCAD: Correlation Structure-Based Collective Anomaly Detection in Complex System",
"doi": null,
"abstractUrl": "/journal/tk/2023/05/09740038/1BWZdToFUVW",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2020/7624/0/762400a598",
"title": "Collective Anomaly Detection for Multivariate Data using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2020/762400a598/1uGYtR0xPvq",
"parentPublication": {
"id": "proceedings/csci/2020/7624/0",
"title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1s645BaTzVu",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"acronym": "big-data",
"groupId": "1802964",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1s645Gy8EDe",
"doi": "10.1109/BigData50022.2020.9378419",
"title": "Failure Prediction in Datacenters Using Unsupervised Multimodal Anomaly Detection",
"normalizedTitle": "Failure Prediction in Datacenters Using Unsupervised Multimodal Anomaly Detection",
"abstract": "Predicting hard drive failures in datacenters can help avoid wasting resources and waiting time for recovery. Anomaly detection from sensing data is commonly used for predicting failures. Usually, conventional threshold-based anomaly detection methods consider each sensor independently. However, deciding an optimal threshold for each type of sensors is not trivial, especially for large-scale systems in datacenters. To detect failures that cannot conventionally be detected, multimodal anomaly detection becomes crucial integrating sensing data from different types of sensors. This work proposes a correlation-based multimodal anomaly detection approach. This approach is applied to a Network-Attached Storage (NAS) system with multiple hard disk drives (HDDs) and three sensors, which are a thermal camera, a microphone, and system performance logs. The unimodal results show that the auditory and system performance model can detect temporal anomalies, and the thermal model can detect spatial anomalies. The multimodal results show that even with a simple filter and detection algorithms, the multimodal approach was able to detect failure signs before the real failure and also earlier than the auditory unimodal approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Predicting hard drive failures in datacenters can help avoid wasting resources and waiting time for recovery. Anomaly detection from sensing data is commonly used for predicting failures. Usually, conventional threshold-based anomaly detection methods consider each sensor independently. However, deciding an optimal threshold for each type of sensors is not trivial, especially for large-scale systems in datacenters. To detect failures that cannot conventionally be detected, multimodal anomaly detection becomes crucial integrating sensing data from different types of sensors. This work proposes a correlation-based multimodal anomaly detection approach. This approach is applied to a Network-Attached Storage (NAS) system with multiple hard disk drives (HDDs) and three sensors, which are a thermal camera, a microphone, and system performance logs. The unimodal results show that the auditory and system performance model can detect temporal anomalies, and the thermal model can detect spatial anomalies. The multimodal results show that even with a simple filter and detection algorithms, the multimodal approach was able to detect failure signs before the real failure and also earlier than the auditory unimodal approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Predicting hard drive failures in datacenters can help avoid wasting resources and waiting time for recovery. Anomaly detection from sensing data is commonly used for predicting failures. Usually, conventional threshold-based anomaly detection methods consider each sensor independently. However, deciding an optimal threshold for each type of sensors is not trivial, especially for large-scale systems in datacenters. To detect failures that cannot conventionally be detected, multimodal anomaly detection becomes crucial integrating sensing data from different types of sensors. This work proposes a correlation-based multimodal anomaly detection approach. This approach is applied to a Network-Attached Storage (NAS) system with multiple hard disk drives (HDDs) and three sensors, which are a thermal camera, a microphone, and system performance logs. The unimodal results show that the auditory and system performance model can detect temporal anomalies, and the thermal model can detect spatial anomalies. The multimodal results show that even with a simple filter and detection algorithms, the multimodal approach was able to detect failure signs before the real failure and also earlier than the auditory unimodal approach.",
"fno": "09378419",
"keywords": [
"Computer Centres",
"Disc Drives",
"Hard Discs",
"Large Scale Systems",
"Sensors",
"Unsupervised Learning",
"Failure Prediction",
"Datacenters",
"Unsupervised Multimodal Anomaly Detection",
"Hard Drive Failures",
"Optimal Threshold",
"Large Scale Systems",
"Correlation Based Multimodal Anomaly Detection",
"Multiple Hard Disk Drives",
"Temporal Anomalies",
"Spatial Anomalies",
"Simple Filter",
"Failure Signs",
"Auditory Unimodal Approach",
"Threshold Based Anomaly Detection Methods",
"Network Attached Storage System",
"Temperature Sensors",
"Correlation",
"System Performance",
"Filtering Algorithms",
"Big Data",
"Detection Algorithms",
"Anomaly Detection",
"Unsupervised Anomaly Detection",
"Multimodal Approach",
"HDD Failure"
],
"authors": [
{
"affiliation": "Tohoku University,Graduate School of Information Sciences",
"fullName": "Minglu Zhao",
"givenName": "Minglu",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tohoku University,Graduate School of Information Sciences",
"fullName": "Reo Furuhata",
"givenName": "Reo",
"surname": "Furuhata",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tohoku University,Cyberscience Center",
"fullName": "Mulya Agung",
"givenName": "Mulya",
"surname": "Agung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tohoku University,Cyberscience Center",
"fullName": "Hiroyuki Takizawa",
"givenName": "Hiroyuki",
"surname": "Takizawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NEC Corporation,Corporate Business Development Division",
"fullName": "Tomoya Soma",
"givenName": "Tomoya",
"surname": "Soma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "big-data",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "3545-3549",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6251-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09378168",
"articleId": "1s64CZrZalq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09378151",
"articleId": "1s64Y7qMs5q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/issrew/2017/2387/0/2387a048",
"title": "Rich Network Anomaly Detection Using Multivariate Data",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2017/2387a048/12OmNAle6Fb",
"parentPublication": {
"id": "proceedings/issrew/2017/2387/0",
"title": "2017 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcoss/2013/5041/0/5041a191",
"title": "Model-Based Thermal Anomaly Detection in Cloud Datacenters",
"doi": null,
"abstractUrl": "/proceedings-article/dcoss/2013/5041a191/12OmNwI8cew",
"parentPublication": {
"id": "proceedings/dcoss/2013/5041/0",
"title": "2013 IEEE International Conference on Distributed Computing in Sensor Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2002/02/t0108",
"title": "Anomaly Detection in Embedded Systems",
"doi": null,
"abstractUrl": "/journal/tc/2002/02/t0108/13rRUwI5TQ9",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000a129",
"title": "Unsupervised Anomaly Detection for Traffic Surveillance Based on Background Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000a129/17D45Wc1IKd",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2018/9159/0/08594955",
"title": "DOPING: Generative Data Augmentation for Unsupervised Anomaly Detection with GAN",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2018/08594955/17D45Xh13p3",
"parentPublication": {
"id": "proceedings/icdm/2018/9159/0",
"title": "2018 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloud/2019/2705/0/270500a179",
"title": "Anomaly Detection from System Tracing Data Using Multimodal Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cloud/2019/270500a179/1cTJ4JOmEta",
"parentPublication": {
"id": "proceedings/cloud/2019/2705/0",
"title": "2019 IEEE 12th International Conference on Cloud Computing (CLOUD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102818",
"title": "Glad: Global And Local Anomaly Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102818/1kwrlp2eJVK",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2020/4380/0/438000b215",
"title": "RPAD: An Unsupervised HTTP Request Parameter Anomaly Detection Method",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2020/438000b215/1r548FG5ohG",
"parentPublication": {
"id": "proceedings/trustcom/2020/4380/0",
"title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2020/9916/0/991600a735",
"title": "WSAD: An Unsupervised Web Session Anomaly Detection Method",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2020/991600a735/1sBO2Q1WqdO",
"parentPublication": {
"id": "proceedings/msn/2020/9916/0",
"title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smds/2021/0058/0/005800a142",
"title": "Track Before Detect: A Novel Approach For Unsupervised Anomaly Detection In Time Series",
"doi": null,
"abstractUrl": "/proceedings-article/smds/2021/005800a142/1yeQvCFJDtS",
"parentPublication": {
"id": "proceedings/smds/2021/0058/0",
"title": "2021 IEEE International Conference on Smart Data Services (SMDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1uGYtvXFOyQ",
"title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)",
"acronym": "csci",
"groupId": "1803739",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1uGYtR0xPvq",
"doi": "10.1109/CSCI51800.2020.00106",
"title": "Collective Anomaly Detection for Multivariate Data using Generative Adversarial Networks",
"normalizedTitle": "Collective Anomaly Detection for Multivariate Data using Generative Adversarial Networks",
"abstract": "Generative adversarial network (GAN) is used to model complex high-dimensional distributions of real-world scenarios. It has been applied to anomaly detection and making great achievements. However, most of the existing GAN-based anomaly detection methods cannot detect collective anomalies that change the behavior of multipoint data instances. Moreover, although many GAN-based methods for time-series anomaly detection have been proposed, there are few studies to handle collective anomalies in time-series data. Besides, there is still much room to improve the methods in terms of computational cost and the accuracy for detecting anomaly. We thus aim to propose a GAN-based method to detect multi-dimensional collective anomalies with high accuracy. To correctly detect collective anomalies, we especially introduce an encoder into a GAN-based anomaly detection method to obtain the latent states of the real data. We furthermore adopt a sequence to sequence technique to both encoder and generator, recurrent neural network, and fully connected neural network for the discriminator. We conducted experiments using two types of datasets: artificial and natural, and verified the effectiveness of our GAN model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Generative adversarial network (GAN) is used to model complex high-dimensional distributions of real-world scenarios. It has been applied to anomaly detection and making great achievements. However, most of the existing GAN-based anomaly detection methods cannot detect collective anomalies that change the behavior of multipoint data instances. Moreover, although many GAN-based methods for time-series anomaly detection have been proposed, there are few studies to handle collective anomalies in time-series data. Besides, there is still much room to improve the methods in terms of computational cost and the accuracy for detecting anomaly. We thus aim to propose a GAN-based method to detect multi-dimensional collective anomalies with high accuracy. To correctly detect collective anomalies, we especially introduce an encoder into a GAN-based anomaly detection method to obtain the latent states of the real data. We furthermore adopt a sequence to sequence technique to both encoder and generator, recurrent neural network, and fully connected neural network for the discriminator. We conducted experiments using two types of datasets: artificial and natural, and verified the effectiveness of our GAN model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Generative adversarial network (GAN) is used to model complex high-dimensional distributions of real-world scenarios. It has been applied to anomaly detection and making great achievements. However, most of the existing GAN-based anomaly detection methods cannot detect collective anomalies that change the behavior of multipoint data instances. Moreover, although many GAN-based methods for time-series anomaly detection have been proposed, there are few studies to handle collective anomalies in time-series data. Besides, there is still much room to improve the methods in terms of computational cost and the accuracy for detecting anomaly. We thus aim to propose a GAN-based method to detect multi-dimensional collective anomalies with high accuracy. To correctly detect collective anomalies, we especially introduce an encoder into a GAN-based anomaly detection method to obtain the latent states of the real data. We furthermore adopt a sequence to sequence technique to both encoder and generator, recurrent neural network, and fully connected neural network for the discriminator. We conducted experiments using two types of datasets: artificial and natural, and verified the effectiveness of our GAN model.",
"fno": "762400a598",
"keywords": [
"Data Mining",
"Recurrent Neural Nets",
"Security Of Data",
"Time Series",
"Existing GAN Based Anomaly Detection Methods",
"Multipoint Data Instances",
"GAN Based Method",
"Time Series Anomaly Detection",
"Time Series Data",
"Multidimensional Collective Anomalies",
"GAN Based Anomaly Detection Method",
"GAN Model",
"Collective Anomaly Detection",
"Multivariate Data",
"Generative Adversarial Network",
"High Dimensional Distributions",
"Recurrent Neural Networks",
"Scientific Computing",
"Generative Adversarial Networks",
"Generators",
"Data Models",
"Computational Efficiency",
"Anomaly Detection",
"GAN",
"Collective Anomalies",
"Time Series"
],
"authors": [
{
"affiliation": "Ochanomizu University,Tokyo,Japan",
"fullName": "Chihiro Maru",
"givenName": "Chihiro",
"surname": "Maru",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ochanomizu University,Tokyo,Japan",
"fullName": "Ichiro Kobayashi",
"givenName": "Ichiro",
"surname": "Kobayashi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csci",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "598-604",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7624-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "762400a591",
"articleId": "1uGYRsZinvy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "762400a605",
"articleId": "1uGYGCYfwTC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/big-data/2018/5035/0/08622424",
"title": "Coupled IGMM-GANs for improved generative adversarial anomaly detection",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2018/08622424/17D45Vu1Ty9",
"parentPublication": {
"id": "proceedings/big-data/2018/5035/0",
"title": "2018 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545381",
"title": "Anomaly Detection via Minimum Likelihood Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545381/17D45XcttmW",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2018/9159/0/08594897",
"title": "Adversarially Learned Anomaly Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2018/08594897/17D45XeKgtO",
"parentPublication": {
"id": "proceedings/icdm/2018/9159/0",
"title": "2018 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671994",
"title": "Anomaly detection of high-dimensional sparse data based on Ensemble Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671994/1A8hbtEBZS0",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09965739",
"title": "Supervised Anomaly Detection via Conditional Generative Adversarial Network and Ensemble Active Learning",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09965739/1IHMNRyHTMs",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300c324",
"title": "Unsupervised Anomaly Detection for IoT Data based on Robust Adversarial Learning",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300c324/1LSPAhUqif6",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0",
"title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600b047",
"title": "Anomaly Detection in Time Series using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600b047/1gAwYGpiiB2",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaice/2020/9146/0/914600a330",
"title": "..Dis-AE-LSTM: Generative Adversarial Networks for Anomaly Detection of Time Series Data",
"doi": null,
"abstractUrl": "/proceedings-article/icaice/2020/914600a330/1rCg88k5Oh2",
"parentPublication": {
"id": "proceedings/icaice/2020/9146/0",
"title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378139",
"title": "TadGAN: Time Series Anomaly Detection Using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378139/1s64CPQKkA8",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09618824",
"title": "GAN-Based Anomaly Detection for Multivariate Time Series Using Polluted Training Set",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09618824/1yBALP5y0Xm",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCfAPCa",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAoDilO",
"doi": "10.1109/VISUAL.2001.964531",
"title": "Real-Time Decompression and Visualization of Animated Volume Data",
"normalizedTitle": "Real-Time Decompression and Visualization of Animated Volume Data",
"abstract": "Interactive exploration of animated volume data is required by many application, but the huge amount of computational time and storage space needed for rendering does not allow the visualization of animated volumes by now. In this paper we introduce an algorithm running at interactive frame rates using 3d wavelet transforms that allows for any wavelet, motion compensation techniques and various encoding schemes of the resulting wavelet coefficients to be used. We analyze different families and orders of wavelets for compression ratio and the introduced error. We use a quantization that has been optimized for the visual impression of the reconstructed volume independent of the viewing. This enables us to achieve very high compression ratios while still being able to reconstruct the volume with as few visual artifacts as possible. A further improvement of the compression ratio has been achieved by applying a motion compensation scheme to exploit temporal coherency. Using these scheme we are capable of decompressing each volume of our animation at interactive frame rates, while visualizing these decompressed volumes on a single PC. We also present a number of improved visualization algorithms for high quality display using OpenGL hardware running at interactive frame rates on a standard PC.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Interactive exploration of animated volume data is required by many application, but the huge amount of computational time and storage space needed for rendering does not allow the visualization of animated volumes by now. In this paper we introduce an algorithm running at interactive frame rates using 3d wavelet transforms that allows for any wavelet, motion compensation techniques and various encoding schemes of the resulting wavelet coefficients to be used. We analyze different families and orders of wavelets for compression ratio and the introduced error. We use a quantization that has been optimized for the visual impression of the reconstructed volume independent of the viewing. This enables us to achieve very high compression ratios while still being able to reconstruct the volume with as few visual artifacts as possible. A further improvement of the compression ratio has been achieved by applying a motion compensation scheme to exploit temporal coherency. Using these scheme we are capable of decompressing each volume of our animation at interactive frame rates, while visualizing these decompressed volumes on a single PC. We also present a number of improved visualization algorithms for high quality display using OpenGL hardware running at interactive frame rates on a standard PC.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Interactive exploration of animated volume data is required by many application, but the huge amount of computational time and storage space needed for rendering does not allow the visualization of animated volumes by now. In this paper we introduce an algorithm running at interactive frame rates using 3d wavelet transforms that allows for any wavelet, motion compensation techniques and various encoding schemes of the resulting wavelet coefficients to be used. We analyze different families and orders of wavelets for compression ratio and the introduced error. We use a quantization that has been optimized for the visual impression of the reconstructed volume independent of the viewing. This enables us to achieve very high compression ratios while still being able to reconstruct the volume with as few visual artifacts as possible. A further improvement of the compression ratio has been achieved by applying a motion compensation scheme to exploit temporal coherency. Using these scheme we are capable of decompressing each volume of our animation at interactive frame rates, while visualizing these decompressed volumes on a single PC. We also present a number of improved visualization algorithms for high quality display using OpenGL hardware running at interactive frame rates on a standard PC.",
"fno": "7200guthe",
"keywords": [
"Time Critical Visualization",
"Compression For Visualization",
"Volume Rendering"
],
"authors": [
{
"affiliation": "WSI/GRIS University of Tübingen",
"fullName": "Stefan Guthe",
"givenName": "Stefan",
"surname": "Guthe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "WSI/GRIS University of Tübingen",
"fullName": "Wolfgang Straßer",
"givenName": "Wolfgang",
"surname": "Straßer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-10-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2001",
"issn": "1070-2385",
"isbn": "0-7803-7200-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7200haber",
"articleId": "12OmNwp74Fa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7200ho",
"articleId": "12OmNz5s0IG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzUPpyX",
"title": "2010 Data Compression Conference (DCC 2010)",
"acronym": "dcc",
"groupId": "1000177",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBOUxkQ",
"doi": "10.1109/DCC.2010.50",
"title": "Depth Compression of 3D Object Represented by Layered Depth Image",
"normalizedTitle": "Depth Compression of 3D Object Represented by Layered Depth Image",
"abstract": "A Layered Depth Image (LDI) is one of the popular representation and rendering methods for 3D objects with complex geometries. In this paper, we propose the new compression algorithm for depth information of a 3D object represented by LDI. For the purpose, we introduce the concept of partial surfaces to seek highly correlated depth data irrespective of their layer and derive a depth compression algorithm by using them. Partial surfaces are approximated by a Be¿zier patch and residual information is encoded by a shape-adaptive transform. Experimental results show that our proposed compression method achieves a better compression performance than any other previous methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A Layered Depth Image (LDI) is one of the popular representation and rendering methods for 3D objects with complex geometries. In this paper, we propose the new compression algorithm for depth information of a 3D object represented by LDI. For the purpose, we introduce the concept of partial surfaces to seek highly correlated depth data irrespective of their layer and derive a depth compression algorithm by using them. Partial surfaces are approximated by a Be¿zier patch and residual information is encoded by a shape-adaptive transform. Experimental results show that our proposed compression method achieves a better compression performance than any other previous methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A Layered Depth Image (LDI) is one of the popular representation and rendering methods for 3D objects with complex geometries. In this paper, we propose the new compression algorithm for depth information of a 3D object represented by LDI. For the purpose, we introduce the concept of partial surfaces to seek highly correlated depth data irrespective of their layer and derive a depth compression algorithm by using them. Partial surfaces are approximated by a Be¿zier patch and residual information is encoded by a shape-adaptive transform. Experimental results show that our proposed compression method achieves a better compression performance than any other previous methods.",
"fno": "05453470",
"keywords": [
"Data Compression",
"Image Coding",
"Rendering Computer Graphics",
"3 D Object Depth Compression Algorithms",
"Layered Depth Image",
"LDI",
"3 D Objects Rendering Methods",
"Complex Geometries",
"LDI",
"Bezier Patch",
"Residual Information",
"Shape Adaptive Transform",
"Image Coding",
"Rendering Computer Graphics",
"Layout",
"Data Mining",
"Compression Algorithms",
"Solid Modeling",
"Data Structures",
"Image Generation",
"Data Compression",
"Geometry"
],
"authors": [
{
"affiliation": null,
"fullName": "Sang-Young Park",
"givenName": "Sang-Young",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Seong-Dae Kim",
"givenName": "Seong-Dae",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-03-01T00:00:00",
"pubType": "proceedings",
"pages": "504-513",
"year": "2010",
"issn": "1068-0314",
"isbn": "978-1-4244-6425-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05453471",
"articleId": "12OmNyXMQgP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05453473",
"articleId": "12OmNxwENLq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dcc/2001/1031/0/10310331",
"title": "Compression of the Layered Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2001/10310331/12OmNB1wkNe",
"parentPublication": {
"id": "proceedings/dcc/2001/1031/0",
"title": "Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2011/279/0/05749524",
"title": "Rendering Lossless Compression of Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2011/05749524/12OmNqzcvHl",
"parentPublication": {
"id": "proceedings/dcc/2011/279/0",
"title": "2011 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mue/2008/3134/0/3134a470",
"title": "Reconfigurable Depth Buffer Compression Design for 3D Graphics System",
"doi": null,
"abstractUrl": "/proceedings-article/mue/2008/3134a470/12OmNrJROVJ",
"parentPublication": {
"id": "proceedings/mue/2008/3134/0",
"title": "Multimedia and Ubiquitous Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890645",
"title": "Sleep monitoring via depth video compression & analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890645/12OmNvEhfZX",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011969",
"title": "Efficient depth map compression exploiting segmented color data",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011969/12OmNvF83qI",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2013/5099/0/5099a123",
"title": "Explorable Volumetric Depth Images from Raycasting",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2013/5099a123/12OmNwBT1oL",
"parentPublication": {
"id": "proceedings/sibgrapi/2013/5099/0",
"title": "2013 XXVI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1998/9176/0/91760211",
"title": "Efficient Warping for Architectural Walkthroughs Using Layered Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1998/91760211/12OmNwekjHc",
"parentPublication": {
"id": "proceedings/ieee-vis/1998/9176/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607553",
"title": "Design of multi-mode depth buffer compression for 3D graphics system",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607553/12OmNxWcHhY",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/03/v0265",
"title": "A Method to Generate Soft Shadows Using a Layered Depth Image and Warping",
"doi": null,
"abstractUrl": "/journal/tg/2005/03/v0265/13rRUwjoNwO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i025",
"title": "3D Photography Using Context-Aware Layered Depth Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i025/1m3opTjsjNC",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBSBk6z",
"title": "2014 5th International Conference on Computing, Communication and Networking Technologies (ICCCNT)",
"acronym": "icccnt",
"groupId": "1802177",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBuL1h7",
"doi": "10.1109/ICCCNT.2014.6963150",
"title": "Reprojection of textured depth map for network rendering",
"normalizedTitle": "Reprojection of textured depth map for network rendering",
"abstract": "Rendering over the network is widely used in visualization related applications. In this paper, a novel network rendering method for large-scale digital models is proposed. It uses an extension of textured depth map, called Screen Instance Map (SIM) that stores sparse server rendered results and rebuilds in-between frames in the client. The server side is only responsible for the creation of SIMs. A cache module, which is used to manage the SIMs, is introduced to improve the performance of the client. The proposed method can be applied to arbitrary models and can be easily extended to various rendering methods. Experiment results show that the implemented system can provide interactive frame rate for large-scale models. Compared with existing solutions, this method needs no additional meshes or keeping viewing history for each client in the server.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Rendering over the network is widely used in visualization related applications. In this paper, a novel network rendering method for large-scale digital models is proposed. It uses an extension of textured depth map, called Screen Instance Map (SIM) that stores sparse server rendered results and rebuilds in-between frames in the client. The server side is only responsible for the creation of SIMs. A cache module, which is used to manage the SIMs, is introduced to improve the performance of the client. The proposed method can be applied to arbitrary models and can be easily extended to various rendering methods. Experiment results show that the implemented system can provide interactive frame rate for large-scale models. Compared with existing solutions, this method needs no additional meshes or keeping viewing history for each client in the server.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Rendering over the network is widely used in visualization related applications. In this paper, a novel network rendering method for large-scale digital models is proposed. It uses an extension of textured depth map, called Screen Instance Map (SIM) that stores sparse server rendered results and rebuilds in-between frames in the client. The server side is only responsible for the creation of SIMs. A cache module, which is used to manage the SIMs, is introduced to improve the performance of the client. The proposed method can be applied to arbitrary models and can be easily extended to various rendering methods. Experiment results show that the implemented system can provide interactive frame rate for large-scale models. Compared with existing solutions, this method needs no additional meshes or keeping viewing history for each client in the server.",
"fno": "06963150",
"keywords": [
"Rendering Computer Graphics",
"Servers",
"Computational Modeling",
"History",
"Pipelines",
"Adaptation Models",
"Image Reconstruction",
"Textured Depth Map",
"Graphics Systems",
"Network Graphics",
"Image Based Rendering"
],
"authors": [
{
"affiliation": "Department of Computer Science, The University of Hong Kong",
"fullName": "Li Cao",
"givenName": "Li",
"surname": "Cao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, The University of Hong Kong",
"fullName": "Zhan Yuan",
"givenName": "Zhan",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, The University of Hong Kong",
"fullName": "Bin Chan",
"givenName": "Bin",
"surname": "Chan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, The University of Hong Kong",
"fullName": "Wenping Wang",
"givenName": "Wenping",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icccnt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-2696-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06963149",
"articleId": "12OmNwpGgM6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06963151",
"articleId": "12OmNynsbvM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457602",
"title": "Image-based network rendering system for large sized meshes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457602/12OmNBTJIJ7",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/2000/6478/0/00885713",
"title": "Multi-user view-dependent rendering",
"doi": null,
"abstractUrl": "/proceedings-article/visual/2000/00885713/12OmNBlXs8t",
"parentPublication": {
"id": "proceedings/visual/2000/6478/0",
"title": "Proceedings Visualization 2000. VIS 2000",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icws/2016/2675/0/2675a602",
"title": "A Hybrid Web Rendering Framework on Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/icws/2016/2675a602/12OmNvjyxVz",
"parentPublication": {
"id": "proceedings/icws/2016/2675/0",
"title": "2016 IEEE International Conference on Web Services (ICWS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2017/0560/0/08026271",
"title": "Distributed rendering: Interaction delay reduction in remote rendering with client-end GPU-accelerated scene warping technique",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026271/12OmNwIpNk0",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d259",
"title": "Contact Elements Prediction Based Haptic Rendering Method for Collaborative Virtual Assembly System",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d259/12OmNwJybQW",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v1307",
"title": "Progressive Volume Rendering of Large Unstructured Grids",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v1307/13rRUwfZC05",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/05/06727579",
"title": "Hybrid Rendering with Scheduling under Uncertainty",
"doi": null,
"abstractUrl": "/journal/tg/2014/05/06727579/13rRUwjGoG3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/03/ttg2008030576",
"title": "Interactive View-Dependent Rendering over Networks",
"doi": null,
"abstractUrl": "/journal/tg/2008/03/ttg2008030576/13rRUxC0SOT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit/2018/0385/0/08672700",
"title": "Real-Time Heterogeneous Volume Modelling and Rendering Environment",
"doi": null,
"abstractUrl": "/proceedings-article/acit/2018/08672700/18IpiJRLEFW",
"parentPublication": {
"id": "proceedings/acit/2018/0385/0",
"title": "2018 International Arab Conference on Information Technology (ACIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcsw/2022/8879/0/887900a209",
"title": "Distributed Rendering for Video Games via Object Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icdcsw/2022/887900a209/1IFJA0En3ji",
"parentPublication": {
"id": "proceedings/icdcsw/2022/8879/0",
"title": "2022 IEEE 42nd International Conference on Distributed Computing Systems Workshops (ICDCSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNz4BdvV",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCvcLKq",
"doi": "10.1109/ICMEW.2012.112",
"title": "Improving Depth Compression in HEVC by Pre/Post Processing",
"normalizedTitle": "Improving Depth Compression in HEVC by Pre/Post Processing",
"abstract": "Depth images have different characteristics from that of color images. They usually have gradual changes within objects while steep changes happen around object boundaries. Compression standards such as H.264/AVC and High Efficiency Video Coding (HEVC) are efficient in dealing with the gradual change regions but usually result in poor performance at edge regions. To facilitate the reuse of the current video coding design and to further improve the depth compression performance, we propose a pre/post processing based compression strategy. By modifying the edge blocks in the depth image to flat blocks, the pre-processed image can be efficiently compressed using existing compression schemes. Meanwhile, those edge blocks are compressed by an edge preserving codec separately. At the decoder, the decoded modified image and the edge blocks will be merged together to form the final reconstructed image. In our simulations, we implement this strategy to HEVC to evaluate the coding performance. Experimental results show the proposed scheme can achieve about 30% - 40% bit savings for Ballet and Break dancers sequences and 60% - 70% bit savings for Kinect captured depth sequences in comparison with HEVC.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Depth images have different characteristics from that of color images. They usually have gradual changes within objects while steep changes happen around object boundaries. Compression standards such as H.264/AVC and High Efficiency Video Coding (HEVC) are efficient in dealing with the gradual change regions but usually result in poor performance at edge regions. To facilitate the reuse of the current video coding design and to further improve the depth compression performance, we propose a pre/post processing based compression strategy. By modifying the edge blocks in the depth image to flat blocks, the pre-processed image can be efficiently compressed using existing compression schemes. Meanwhile, those edge blocks are compressed by an edge preserving codec separately. At the decoder, the decoded modified image and the edge blocks will be merged together to form the final reconstructed image. In our simulations, we implement this strategy to HEVC to evaluate the coding performance. Experimental results show the proposed scheme can achieve about 30% - 40% bit savings for Ballet and Break dancers sequences and 60% - 70% bit savings for Kinect captured depth sequences in comparison with HEVC.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Depth images have different characteristics from that of color images. They usually have gradual changes within objects while steep changes happen around object boundaries. Compression standards such as H.264/AVC and High Efficiency Video Coding (HEVC) are efficient in dealing with the gradual change regions but usually result in poor performance at edge regions. To facilitate the reuse of the current video coding design and to further improve the depth compression performance, we propose a pre/post processing based compression strategy. By modifying the edge blocks in the depth image to flat blocks, the pre-processed image can be efficiently compressed using existing compression schemes. Meanwhile, those edge blocks are compressed by an edge preserving codec separately. At the decoder, the decoded modified image and the edge blocks will be merged together to form the final reconstructed image. In our simulations, we implement this strategy to HEVC to evaluate the coding performance. Experimental results show the proposed scheme can achieve about 30% - 40% bit savings for Ballet and Break dancers sequences and 60% - 70% bit savings for Kinect captured depth sequences in comparison with HEVC.",
"fno": "06266453",
"keywords": [
"Data Compression",
"Image Sequences",
"Video Coding",
"Depth Compression",
"HEVC",
"High Efficiency Video Coding",
"Pre Post Processing",
"Depth Images",
"Color Images",
"Object Boundaries",
"Compression Standards",
"Pre Processed Image",
"Ballet And Break Dancers Sequences",
"Kinect Captured Depth Sequences",
"Image Edge Detection",
"Image Coding",
"Encoding",
"Decoding",
"Codecs",
"Image Reconstruction",
"Rate Distortion",
"Depth Image",
"Pre Post Processing",
"Compression",
"HEVC",
"Kinect"
],
"authors": [
{
"affiliation": null,
"fullName": "Cuiling Lan",
"givenName": "Cuiling",
"surname": "Lan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jizheng Xu",
"givenName": "Jizheng",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Feng Wu",
"givenName": "Feng",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "611-616",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2027-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06266452",
"articleId": "12OmNs59JV0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06266454",
"articleId": "12OmNx5Yv7t",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2017/0560/0/08026221",
"title": "Multi-Intensity Illuminated Infrared video compression using MV-HEVC and 3D-HEVC",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026221/12OmNAoUT8b",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169808",
"title": "Hardware-oriented rate-distortion optimization algorithm for HEVC intra-frame encoder",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169808/12OmNBhZ4hA",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786172",
"title": "Compression Efficiency Improvement over HEVC Main 10 Profile for HDR and WCG Content",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786172/12OmNqBbHFo",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890587",
"title": "Sample edge offset compensation for HEVC based 3D Video Coding",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890587/12OmNqFrGxg",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607535",
"title": "Intensity dependent spatial quantization with application in HEVC",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607535/12OmNqN6QXF",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786188",
"title": "Optimizing Subjective Quality in HEVC-MSP: An Approximate Closed-form Image Compression Approach",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786188/12OmNzAFSX7",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786253",
"title": "Just Noticeable Difference Based Fast Coding Unit Partition in 3D-HEVC Intra Coding",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786253/12OmNzZEAp6",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2023/01/09684978",
"title": "Adaptive HEVC Steganography Based on Steganographic Compression Efficiency Degradation Model",
"doi": null,
"abstractUrl": "/journal/tq/2023/01/09684978/1Ai9zGHz6X6",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102958",
"title": "Enhanced Cu Partitioning Search Method for Intra Coding in HEVC",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102958/1kwqUmArJvy",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428069",
"title": "Cnn-Based Depth Map Prediction for Fast Block Partitioning in HEVC Intra Coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428069/1uilXBlRVfO",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyxXltg",
"title": "2011 Data Compression Conference (DCC)",
"acronym": "dcc",
"groupId": "1000177",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqzcvHl",
"doi": "10.1109/DCC.2011.71",
"title": "Rendering Lossless Compression of Depth Image",
"normalizedTitle": "Rendering Lossless Compression of Depth Image",
"abstract": "Summary form only given. In this work, we experimented on the compression efficiency of rendering lossless compression of depth images and found that the compression ratios can go up to 20.51 and 40 for Interview and Breakdancer test images, respectively, even if the parameter setting is in the worst case (i.e., set fdensity(Znear, Zfar) to its maximum value). This work is our first step toward exploring the performance of rendering lossless compression. It is our belief that, besides the rendering lossless quantization, there are a lot of different issues of depth image compression which are worthy of further exploitation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given. In this work, we experimented on the compression efficiency of rendering lossless compression of depth images and found that the compression ratios can go up to 20.51 and 40 for Interview and Breakdancer test images, respectively, even if the parameter setting is in the worst case (i.e., set fdensity(Znear, Zfar) to its maximum value). This work is our first step toward exploring the performance of rendering lossless compression. It is our belief that, besides the rendering lossless quantization, there are a lot of different issues of depth image compression which are worthy of further exploitation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given. In this work, we experimented on the compression efficiency of rendering lossless compression of depth images and found that the compression ratios can go up to 20.51 and 40 for Interview and Breakdancer test images, respectively, even if the parameter setting is in the worst case (i.e., set fdensity(Znear, Zfar) to its maximum value). This work is our first step toward exploring the performance of rendering lossless compression. It is our belief that, besides the rendering lossless quantization, there are a lot of different issues of depth image compression which are worthy of further exploitation.",
"fno": "05749524",
"keywords": [
"Data Compression",
"Image Coding",
"Rendering Computer Graphics",
"Depth Image Lossless Compression",
"Lossless Compression Rendering",
"Lossless Quantization Rendering",
"Rendering Computer Graphics",
"Image Coding",
"Three Dimensional Displays",
"Quantization",
"Color",
"Image Color Analysis",
"Multimedia Communication",
"Color Plus Depth",
"DIBR",
"Rendering Lossless",
"JPEG LS",
"Lossless Compression"
],
"authors": [
{
"affiliation": null,
"fullName": "Yu-Hsun Lin",
"givenName": "Yu-Hsun",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ja-Ling Wu",
"givenName": "Ja-Ling",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-03-01T00:00:00",
"pubType": "proceedings",
"pages": "467-467",
"year": "2011",
"issn": "1068-0314",
"isbn": "978-1-61284-279-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05749523",
"articleId": "12OmNzw8iZ1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05749525",
"articleId": "12OmNrYlmOq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2013/1604/0/06618363",
"title": "Efficient depth map compression exploiting correlation with texture data in multiresolution predictive image coders",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2013/06618363/12OmNAKcNKT",
"parentPublication": {
"id": "proceedings/icmew/2013/1604/0",
"title": "2013 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150988",
"title": "Image compression using integrated lossless/lossy methods",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150988/12OmNAlvI9N",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1995/7012/0/00515603",
"title": "Lossless compression using conditional entropy-constrained subband quantization",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1995/00515603/12OmNAq3hOx",
"parentPublication": {
"id": "proceedings/dcc/1995/7012/0",
"title": "Proceedings DCC '95 Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2010/6425/0/05453470",
"title": "Depth Compression of 3D Object Represented by Layered Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2010/05453470/12OmNBOUxkQ",
"parentPublication": {
"id": "proceedings/dcc/2010/6425/0",
"title": "2010 Data Compression Conference (DCC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b073",
"title": "Superpixel-Based Disocclusion Filling in Depth Image Based Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b073/12OmNBqdr4j",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2016/7258/0/07552984",
"title": "Lossless depth map coding using binary tree based decomposition and context-based arithmetic coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552984/12OmNBr4exY",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1995/7012/0/70120457",
"title": "Asymmetric lossless image compression",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1995/70120457/12OmNCgrCW3",
"parentPublication": {
"id": "proceedings/dcc/1995/7012/0",
"title": "Proceedings DCC '95 Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/3/73103097",
"title": "An asymmetric lossless image compression technique",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73103097/12OmNwErpWP",
"parentPublication": {
"id": "proceedings/icip/1995/7310/3",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815030",
"title": "Depth-of-Field Rendering with Saliency-Based Bilateral Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815030/12OmNz5JC5F",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1997/7761/0/00582100",
"title": "Perceptually lossless image compression",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1997/00582100/1dUnbYA1ur6",
"parentPublication": {
"id": "proceedings/dcc/1997/7761/0",
"title": "Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAkEU4f",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvF83qI",
"doi": "10.1109/ICME.2011.6011969",
"title": "Efficient depth map compression exploiting segmented color data",
"normalizedTitle": "Efficient depth map compression exploiting segmented color data",
"abstract": "3D video representations usually associate to each view a depth map with the corresponding geometric information. Many compression schemes have been proposed for multi-view video and for depth data, but the exploitation of the correlation between the two representations to enhance compression performances is still an open research issue. This paper presents a novel compression scheme that exploits a segmentation of the color data to predict the shape of the different surfaces in the depth map. Then each segment is approximated with a parameterized plane. In case the approximation is sufficiently accurate for the target bit rate, the surface coefficients are compressed and transmitted. Otherwise, the region is coded using a standard H.264/AVC Intra coder. Experimental results show that the proposed scheme permits to outperformthe standardH.264/AVC Intra codec on depth data and can be effectively included into multi-view plus depth compression schemes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D video representations usually associate to each view a depth map with the corresponding geometric information. Many compression schemes have been proposed for multi-view video and for depth data, but the exploitation of the correlation between the two representations to enhance compression performances is still an open research issue. This paper presents a novel compression scheme that exploits a segmentation of the color data to predict the shape of the different surfaces in the depth map. Then each segment is approximated with a parameterized plane. In case the approximation is sufficiently accurate for the target bit rate, the surface coefficients are compressed and transmitted. Otherwise, the region is coded using a standard H.264/AVC Intra coder. Experimental results show that the proposed scheme permits to outperformthe standardH.264/AVC Intra codec on depth data and can be effectively included into multi-view plus depth compression schemes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D video representations usually associate to each view a depth map with the corresponding geometric information. Many compression schemes have been proposed for multi-view video and for depth data, but the exploitation of the correlation between the two representations to enhance compression performances is still an open research issue. This paper presents a novel compression scheme that exploits a segmentation of the color data to predict the shape of the different surfaces in the depth map. Then each segment is approximated with a parameterized plane. In case the approximation is sufficiently accurate for the target bit rate, the surface coefficients are compressed and transmitted. Otherwise, the region is coded using a standard H.264/AVC Intra coder. Experimental results show that the proposed scheme permits to outperformthe standardH.264/AVC Intra codec on depth data and can be effectively included into multi-view plus depth compression schemes.",
"fno": "06011969",
"keywords": [
"Image Segmentation",
"Encoding",
"Image Coding",
"PSNR",
"Bit Rate",
"Image Color Analysis",
"Approximation Methods",
"Depth Map Coding",
"Image Segmentation",
"H 264 AVC"
],
"authors": [
{
"affiliation": "University of Padova, Department of Information Engineering, Via Gradenigo 6B, 35131, Italy",
"fullName": "Simone Milani",
"givenName": "Simone",
"surname": "Milani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Padova, Department of Information Engineering, Via Gradenigo 6B, 35131, Italy",
"fullName": "Pietro Zanuttigh",
"givenName": "Pietro",
"surname": "Zanuttigh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Denmark, Department of Photonics Engineering, Ørsteds Plads B343, 2800 Kgs. Lyngby, Denmark",
"fullName": "Marco Zamarin",
"givenName": "Marco",
"surname": "Zamarin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Denmark, Department of Photonics Engineering, Ørsteds Plads B343, 2800 Kgs. Lyngby, Denmark",
"fullName": "Søren Forchhammer",
"givenName": "Søren",
"surname": "Forchhammer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2011",
"issn": "1945-7871",
"isbn": "978-1-61284-348-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06011949",
"articleId": "12OmNqJ8tiK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06011970",
"articleId": "12OmNqFJhV2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2016/1552/0/07574704",
"title": "A depth map rate control algorithm for HEVC Multi-View Video plus Depth",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2016/07574704/12OmNAY79kD",
"parentPublication": {
"id": "proceedings/icmew/2016/1552/0",
"title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607516",
"title": "Edge-preserving intra depth coding based on context-coding and H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607516/12OmNBKW9vF",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2014/5179/0/06850804",
"title": "Computational complexity analysis of H.264/AVC video compression by DC mode prediction",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850804/12OmNCvcLJO",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2012/2027/0/06266453",
"title": "Improving Depth Compression in HEVC by Pre/Post Processing",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2012/06266453/12OmNCvcLKq",
"parentPublication": {
"id": "proceedings/icmew/2012/2027/0",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2009/3762/0/3762a312",
"title": "H.264/AVC-Based Depth Map Sequence Coding Using Improved Loop-filter",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2009/3762a312/12OmNvwC5tM",
"parentPublication": {
"id": "proceedings/iih-msp/2009/3762/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607620",
"title": "Removing depth map coding distortion by using post filter set",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607620/12OmNx1Iw9T",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2015/0379/0/0379a282",
"title": "Characterization of the HEVC Coding Efficiency Advance Using 20 Scenes, ITU-T Rec. P.913 Compliant Subjective Methods, VQM, and PSNR",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2015/0379a282/12OmNx4gUth",
"parentPublication": {
"id": "proceedings/ism/2015/0379/0",
"title": "2015 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2014/4331/0/4331a202",
"title": "Efficient Compression Algorithm for H.264/AVC Intra-prediction Using an Activity Map",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2014/4331a202/12OmNzGlRBw",
"parentPublication": {
"id": "proceedings/imis/2014/4331/0",
"title": "2014 Eighth International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607533",
"title": "Improved inter mode decision based on residue in H.264/AVC",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607533/12OmNzVoBFL",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09455977",
"title": "Depth Map Video Compression Performance Evaluation For Ieee 1857.9",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09455977/1uCgnl3Kw6I",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNz4BdvV",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwAt1FC",
"doi": "10.1109/ICMEW.2012.110",
"title": "Kinect-Like Depth Compression with 2D+T Prediction",
"normalizedTitle": "Kinect-Like Depth Compression with 2D+T Prediction",
"abstract": "The Kinect-like depth compression becomes increasingly important due to the growing requirement on Kinect depth data transmission and storage. Considering the temporal inconsistency of Kinect depth introduced by the random depth measurement error, we propose 2D+T prediction algorithm aiming at fully exploiting the temporal depth correlation to enhance the Kinect depth compression efficiency. In our 2D+T prediction, each depth block is treated as a subsurface, and it the motion trend is detected by comparing with the reliable 3D reconstruction surface, which is integrated by accumulated depth information stored in depth volume. The comparison is implemented under the error tolerant rule, which is derived from the depth error model. The experimental results demonstrate our algorithm can remarkably reduce the bitrate cost and the compression complexity. And the visual quality of the 3D reconstruction results generated from our reconstructed depth is similar to that of traditional video compression algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Kinect-like depth compression becomes increasingly important due to the growing requirement on Kinect depth data transmission and storage. Considering the temporal inconsistency of Kinect depth introduced by the random depth measurement error, we propose 2D+T prediction algorithm aiming at fully exploiting the temporal depth correlation to enhance the Kinect depth compression efficiency. In our 2D+T prediction, each depth block is treated as a subsurface, and it the motion trend is detected by comparing with the reliable 3D reconstruction surface, which is integrated by accumulated depth information stored in depth volume. The comparison is implemented under the error tolerant rule, which is derived from the depth error model. The experimental results demonstrate our algorithm can remarkably reduce the bitrate cost and the compression complexity. And the visual quality of the 3D reconstruction results generated from our reconstructed depth is similar to that of traditional video compression algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Kinect-like depth compression becomes increasingly important due to the growing requirement on Kinect depth data transmission and storage. Considering the temporal inconsistency of Kinect depth introduced by the random depth measurement error, we propose 2D+T prediction algorithm aiming at fully exploiting the temporal depth correlation to enhance the Kinect depth compression efficiency. In our 2D+T prediction, each depth block is treated as a subsurface, and it the motion trend is detected by comparing with the reliable 3D reconstruction surface, which is integrated by accumulated depth information stored in depth volume. The comparison is implemented under the error tolerant rule, which is derived from the depth error model. The experimental results demonstrate our algorithm can remarkably reduce the bitrate cost and the compression complexity. And the visual quality of the 3D reconstruction results generated from our reconstructed depth is similar to that of traditional video compression algorithm.",
"fno": "06266451",
"keywords": [
"Correlation Methods",
"Data Compression",
"Image Motion Analysis",
"Image Reconstruction",
"Video Coding",
"Kinect Like Depth Compression",
"Kinect Depth Data Transmission",
"Data Storage",
"Random Depth Measurement Error",
"2 D T Prediction Algorithm",
"Temporal Depth Correlation",
"Kinect Depth Compression Efficiency",
"Depth Block",
"Motion Trend",
"3 D Reconstruction Surface",
"Depth Volume",
"Error Tolerant Rule",
"Depth Error Model",
"Bitrate Cost",
"Compression Complexity",
"Visual Quality",
"Video Compression Algorithm",
"Encoding",
"Image Reconstruction",
"Radiation Detectors",
"Surface Reconstruction",
"Prediction Algorithms",
"Bit Rate",
"Cameras",
"Kinect Like Depth",
"Lossy Compression",
"2 D T Prediction",
"Depth Volume"
],
"authors": [
{
"affiliation": null,
"fullName": "Jingjing Fu",
"givenName": "Jingjing",
"surname": "Fu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dan Miao",
"givenName": "Dan",
"surname": "Miao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Weiren Yu",
"givenName": "Weiren",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shiqi Wang",
"givenName": "Shiqi",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yan Lu",
"givenName": "Yan",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shipeng Li",
"givenName": "Shipeng",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "599-604",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2027-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06266450",
"articleId": "12OmNARRYwy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06266452",
"articleId": "12OmNs59JV0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciibms/2015/8562/0/07439534",
"title": "Experimental results of 2D depth-depth matching algorithm based on depth camera Kinect v1",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2015/07439534/12OmNAKcNO7",
"parentPublication": {
"id": "proceedings/iciibms/2015/8562/0",
"title": "2015 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2012/2027/0/06266453",
"title": "Improving Depth Compression in HEVC by Pre/Post Processing",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2012/06266453/12OmNCvcLKq",
"parentPublication": {
"id": "proceedings/icmew/2012/2027/0",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607501",
"title": "High-quality Kinect depth filtering for real-time 3D telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607501/12OmNrkBwqu",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130380",
"title": "3D with Kinect",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130380/12OmNsd6vky",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450431",
"title": "A Recovery Method for Kinect-Like Depth Map Based on Color Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450431/12OmNwAKCN2",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06814995",
"title": "Dynamic Human Surface Reconstruction Using a Single Kinect",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06814995/12OmNwF0BZ5",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c751",
"title": "Hybrid Kinect Depth Map Refinement for Transparent Objects",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c751/12OmNxveNNV",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019370",
"title": "A unified model for improving depth accuracy in kinect sensor",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019370/12OmNyXMQmz",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2016/2305/0/2305a154",
"title": "Compositing Real and Synthetic Images: Using Kinect and Fisheye Camera",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2016/2305a154/12OmNzICEP2",
"parentPublication": {
"id": "proceedings/nicoint/2016/2305/0",
"title": "2016 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvidl/2020/9481/0/948100a596",
"title": "Research on Kinect Calibration and Depth Error Compensation Based on BP Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvidl/2020/948100a596/1pbebnZ7Uje",
"parentPublication": {
"id": "proceedings/cvidl/2020/9481/0",
"title": "2020 International Conference on Computer Vision, Image and Deep Learning (CVIDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyoiYVn",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxcMSkQ",
"doi": "10.1109/ICVRV.2015.23",
"title": "Viewpoint-Predicting-Based Remote Rendering on Mobile Devices Using Multiple Depth Images",
"normalizedTitle": "Viewpoint-Predicting-Based Remote Rendering on Mobile Devices Using Multiple Depth Images",
"abstract": "In order to break through the constraint of mobile hardware resource and wireless network bandwidth, a viewpoint-predicting-based remote rendering on mobile devices using multiple depth images is proposed. First, a reference viewpoint selection mechanism is given based on user interactions at present, and then guide the generation of corresponding depth images. Secondly, an edge based depth map reconstruction method is brought into encoding and decoding in order to furthermore improve rendering efficiency meanwhile reduce total transmit data on the network. Final synthesis result is produced with the received multiple depth images using 3D image warping. The experimental results show that it not only rises the rendering quality but also reduces the quantity of transmit data, hence meets the requirement of real-time interaction on mobile devices.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to break through the constraint of mobile hardware resource and wireless network bandwidth, a viewpoint-predicting-based remote rendering on mobile devices using multiple depth images is proposed. First, a reference viewpoint selection mechanism is given based on user interactions at present, and then guide the generation of corresponding depth images. Secondly, an edge based depth map reconstruction method is brought into encoding and decoding in order to furthermore improve rendering efficiency meanwhile reduce total transmit data on the network. Final synthesis result is produced with the received multiple depth images using 3D image warping. The experimental results show that it not only rises the rendering quality but also reduces the quantity of transmit data, hence meets the requirement of real-time interaction on mobile devices.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to break through the constraint of mobile hardware resource and wireless network bandwidth, a viewpoint-predicting-based remote rendering on mobile devices using multiple depth images is proposed. First, a reference viewpoint selection mechanism is given based on user interactions at present, and then guide the generation of corresponding depth images. Secondly, an edge based depth map reconstruction method is brought into encoding and decoding in order to furthermore improve rendering efficiency meanwhile reduce total transmit data on the network. Final synthesis result is produced with the received multiple depth images using 3D image warping. The experimental results show that it not only rises the rendering quality but also reduces the quantity of transmit data, hence meets the requirement of real-time interaction on mobile devices.",
"fno": "7673a216",
"keywords": [
"Rendering Computer Graphics",
"Mobile Handsets",
"Three Dimensional Displays",
"Geometry",
"Decoding",
"Image Reconstruction",
"Mobile Communication",
"Depth Map Reconstruction",
"Mobile Remote Rendering",
"3 D Image Warping",
"Multiple Depth Images",
"Viewpoint Predicting"
],
"authors": [
{
"affiliation": null,
"fullName": "Wang Xiaochuan",
"givenName": "Wang",
"surname": "Xiaochuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Liang Xiaohui",
"givenName": "Liang",
"surname": "Xiaohui",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-10-01T00:00:00",
"pubType": "proceedings",
"pages": "216-223",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7673-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7673a209",
"articleId": "12OmNvwTGBk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7673a224",
"articleId": "12OmNAle6pQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isise/2010/4360/0/4360a117",
"title": "A Multiresolution Viewpoint Based Rendering for Large-scale Point Models",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2010/4360a117/12OmNrMHOpQ",
"parentPublication": {
"id": "proceedings/isise/2010/4360/0",
"title": "2010 Third International Symposium on Information Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459357",
"title": "A new multiview spacetime-consistent depth recovery framework for free viewpoint video rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459357/12OmNy4r3SD",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815030",
"title": "Depth-of-Field Rendering with Saliency-Based Bilateral Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815030/12OmNz5JC5F",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446391",
"title": "Visual Perception of Real World Depth Map Resolution for Mixed Reality Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446391/13bd1eSlyst",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g393",
"title": "Aperture Supervision for Monocular Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g393/17D45WIXbNB",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2011/938/0/05766915",
"title": "Reduce latency: The key to successful interactive remote rendering systems",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2011/05766915/17D45XH89q3",
"parentPublication": {
"id": "proceedings/percomw/2011/938/0",
"title": "2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops 2011). PerCom-Workshops 2011: 2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d417",
"title": "Depth-Guided Dense Dynamic Filtering Network for Bokeh Effect Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d417/1i5mB3Uf3gI",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f598",
"title": "A Neural Rendering Framework for Free-Viewpoint Relighting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f598/1m3nb6ySkw0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700d802",
"title": "Viewpoint-agnostic Image Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700d802/1uqGjFoevbG",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j416",
"title": "Space-time Neural Irradiance Fields for Free-Viewpoint Video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j416/1yeHJmi5mQo",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBOll8q",
"title": "2017 International Conference on Advanced Computing and Applications (ACOMP)",
"acronym": "acomp",
"groupId": "1812104",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzsrwiR",
"doi": "10.1109/ACOMP.2017.14",
"title": "Visual Cryptography of Animated GIF Image Based on XOR Operation",
"normalizedTitle": "Visual Cryptography of Animated GIF Image Based on XOR Operation",
"abstract": "This paper describes application of a secret colour image sharing scheme based on XOR operation to the animated GIF images. The scheme is performed without expanding every pixel in the secret image so that size of shared images are equal to size of the secret images. Originally the scheme supports RGB images. In this paper we apply the scheme to the animated GIF image. A GIF image consists of a color palette and a matrix which entries (pixel values) refer to the palette row. XOR operation is not performed to RGB palette of GIF image, but to the matrix. The scheme is applied to each frame of the animated GIF image. As a result, each participant has his (or her) own share, each share is an animated GIF image where the frames look like the random images. The experiment shows that the scheme could be applied to the animated GIF image well. The original animated GIF image could be reconstructed exactly.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes application of a secret colour image sharing scheme based on XOR operation to the animated GIF images. The scheme is performed without expanding every pixel in the secret image so that size of shared images are equal to size of the secret images. Originally the scheme supports RGB images. In this paper we apply the scheme to the animated GIF image. A GIF image consists of a color palette and a matrix which entries (pixel values) refer to the palette row. XOR operation is not performed to RGB palette of GIF image, but to the matrix. The scheme is applied to each frame of the animated GIF image. As a result, each participant has his (or her) own share, each share is an animated GIF image where the frames look like the random images. The experiment shows that the scheme could be applied to the animated GIF image well. The original animated GIF image could be reconstructed exactly.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes application of a secret colour image sharing scheme based on XOR operation to the animated GIF images. The scheme is performed without expanding every pixel in the secret image so that size of shared images are equal to size of the secret images. Originally the scheme supports RGB images. In this paper we apply the scheme to the animated GIF image. A GIF image consists of a color palette and a matrix which entries (pixel values) refer to the palette row. XOR operation is not performed to RGB palette of GIF image, but to the matrix. The scheme is applied to each frame of the animated GIF image. As a result, each participant has his (or her) own share, each share is an animated GIF image where the frames look like the random images. The experiment shows that the scheme could be applied to the animated GIF image well. The original animated GIF image could be reconstructed exactly.",
"fno": "0607a117",
"keywords": [
"Computer Animation",
"Cryptography",
"Image Coding",
"Image Colour Analysis",
"Animated GIF Image",
"XOR Operation",
"Visual Cryptography",
"Secret Colour Image Sharing Scheme",
"RGB Images",
"Image Reconstruction",
"Image Color Analysis",
"Cryptography",
"Color",
"Visualization",
"Complexity Theory",
"Image Resolution",
"Animated GIF Image",
"Secret Color Image Sharing",
"XOR Operation",
"Frame"
],
"authors": [
{
"affiliation": null,
"fullName": "Rinaldi Munir",
"givenName": "Rinaldi",
"surname": "Munir",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acomp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-11-01T00:00:00",
"pubType": "proceedings",
"pages": "117-121",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-0607-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0607a111",
"articleId": "12OmNzBwGLu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0607a122",
"articleId": "12OmNx19jXL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icic/2010/7081/4/05514042",
"title": "A Co-cheating Prevention Visual Cryptography Scheme",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/05514042/12OmNBa2iA9",
"parentPublication": {
"id": "proceedings/icic/2010/7081/4",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a410",
"title": "Reference Image Based Color Correction for Multi-camera Panoramic High Resolution Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a410/12OmNBqdrcp",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2012/2310/0/06468628",
"title": "Halftone visual cryptography with color shares",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2012/06468628/12OmNwdL7pk",
"parentPublication": {
"id": "proceedings/grc/2012/2310/0",
"title": "2012 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e641",
"title": "TGIF: A New Dataset and Benchmark on Animated GIF Description",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e641/12OmNxWuiii",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607407",
"title": "Optimizing the capacity of distortion-freewatermarking on palette images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607407/12OmNyfdON5",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2017/0560/0/08026304",
"title": "Color me, store me, know me not: Supporting image color transfer and storage in encrypted domain over cloud",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026304/12OmNzGDsH4",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06671915",
"title": "Animated Depth Images for Interactive Remote Visualization of Time-Varying Data Sets",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06671915/13rRUwj7cpb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2021/9489/0/948900a459",
"title": "Probabilistic Grayscale Visual Cryptography Scheme Using Multi-Pixel Encoding",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2021/948900a459/1AUpDiKk0Io",
"parentPublication": {
"id": "proceedings/cis/2021/9489/0",
"title": "2021 17th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300e581",
"title": "Lossy GIF Compression Using Deep Intrinsic Parameterization",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300e581/1i5mmTKrnZS",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800o4461",
"title": "GIFnets: Differentiable GIF Encoding Framework",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800o4461/1m3nV74cpMY",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAsTgXc",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCga1RM",
"doi": "10.1109/ICCVW.2011.6130447",
"title": "A pixel-based approach to template-based monocular 3D reconstruction of deformable surfaces",
"normalizedTitle": "A pixel-based approach to template-based monocular 3D reconstruction of deformable surfaces",
"abstract": "Most of the previous work on template-based deformable 3D surface reconstruction using a single view is feature-based. We propose a pixel-based formulation in a variational framework; the unknown is the surface function. The color discrepancy between the template and the deformed images is formalized as a functional of the surface function. The main difficulty in such a formulation arises when the surface self-occludes which induces discontinuities in the discrepancy measure at the self-occlusion boundary. Based on previous work on 3D rigid surface reconstruction, we rigorously formalize the visibility as a continuous functional of the surface function. It is derived in the template for visible/self-occluded regions in the deformed image. The gradient of the color discrepancy is computed with respect to the surface function. The minimization smoothly updates the surface function to fit the self-occlusion boundary. Gradient descent is initialized from feature-based 3D reconstruction. Our experimental results on simulated and real data show that during the minimization of the color discrepancy, the self-occlusion boundary of the reconstructed surface moves to its correct location in the image. We show quantitatively that in the template image, the accuracy of visible/self-occluded areas is improved to a significant extent.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most of the previous work on template-based deformable 3D surface reconstruction using a single view is feature-based. We propose a pixel-based formulation in a variational framework; the unknown is the surface function. The color discrepancy between the template and the deformed images is formalized as a functional of the surface function. The main difficulty in such a formulation arises when the surface self-occludes which induces discontinuities in the discrepancy measure at the self-occlusion boundary. Based on previous work on 3D rigid surface reconstruction, we rigorously formalize the visibility as a continuous functional of the surface function. It is derived in the template for visible/self-occluded regions in the deformed image. The gradient of the color discrepancy is computed with respect to the surface function. The minimization smoothly updates the surface function to fit the self-occlusion boundary. Gradient descent is initialized from feature-based 3D reconstruction. Our experimental results on simulated and real data show that during the minimization of the color discrepancy, the self-occlusion boundary of the reconstructed surface moves to its correct location in the image. We show quantitatively that in the template image, the accuracy of visible/self-occluded areas is improved to a significant extent.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most of the previous work on template-based deformable 3D surface reconstruction using a single view is feature-based. We propose a pixel-based formulation in a variational framework; the unknown is the surface function. The color discrepancy between the template and the deformed images is formalized as a functional of the surface function. The main difficulty in such a formulation arises when the surface self-occludes which induces discontinuities in the discrepancy measure at the self-occlusion boundary. Based on previous work on 3D rigid surface reconstruction, we rigorously formalize the visibility as a continuous functional of the surface function. It is derived in the template for visible/self-occluded regions in the deformed image. The gradient of the color discrepancy is computed with respect to the surface function. The minimization smoothly updates the surface function to fit the self-occlusion boundary. Gradient descent is initialized from feature-based 3D reconstruction. Our experimental results on simulated and real data show that during the minimization of the color discrepancy, the self-occlusion boundary of the reconstructed surface moves to its correct location in the image. We show quantitatively that in the template image, the accuracy of visible/self-occluded areas is improved to a significant extent.",
"fno": "06130447",
"keywords": [
"Feature Extraction",
"Gradient Methods",
"Hidden Feature Removal",
"Image Colour Analysis",
"Image Reconstruction",
"Surface Reconstruction",
"Pixel Based Approach",
"Template Based Monocular 3 D Reconstruction",
"Deformable Surfaces",
"Template Based Deformable 3 D Surface Reconstruction",
"Single View",
"Pixel Based Formulation",
"Surface Function",
"Deformed Images",
"Discrepancy Measure",
"Self Occlusion Boundary",
"3 D Rigid Surface Reconstruction",
"Continuous Functional",
"Visible Regions",
"Self Occluded Regions",
"Color Discrepancy Gradient",
"Minimization",
"Gradient Descent",
"Feature Based 3 D Reconstruction",
"Template Image",
"Surface Reconstruction",
"Three Dimensional Displays",
"Rough Surfaces",
"Surface Roughness",
"Image Color Analysis",
"Image Reconstruction",
"Shape"
],
"authors": [
{
"affiliation": "ALCoV-ISIT, Université d'Auvergne, Clermont-Ferrand, France",
"fullName": "Abed Malti",
"givenName": "Abed",
"surname": "Malti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ALCoV-ISIT, Université d'Auvergne, Clermont-Ferrand, France",
"fullName": "Adrien Bartoli",
"givenName": "Adrien",
"surname": "Bartoli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ALCoV-ISIT, Université d'Auvergne, Clermont-Ferrand, France",
"fullName": "Toby Collins",
"givenName": "Toby",
"surname": "Collins",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1650-1657",
"year": "2011",
"issn": null,
"isbn": "978-1-4673-0063-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06130446",
"articleId": "12OmNxWLTz8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06130448",
"articleId": "12OmNyrqzkO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2009/4420/0/05459403",
"title": "Template-free monocular reconstruction of deformable surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459403/12OmNqJHFsI",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420828",
"title": "Deformable Velcro surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420828/12OmNrMZpqm",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206498",
"title": "A unified model of specular and diffuse reflectance for rough, glossy surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206498/12OmNvAiShy",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109b710",
"title": "Monocular 3D Tracking of Deformable Surfaces Using Linear Programming",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109b710/12OmNwnYFWR",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a440",
"title": "Merge2-3D: Combining Multiple Normal Maps with 3D Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a440/12OmNx8Ouv7",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a019",
"title": "Multi-view Reconstruction of Highly Specular Surfaces in Uncontrolled Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a019/12OmNynJMFo",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a333",
"title": "Tracking Deformable Surfaces That Undergo Topological Changes Using an RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a333/12OmNyugyRN",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/05/ttp2011050931",
"title": "Linear Local Models for Monocular Reconstruction of Deformable Surfaces",
"doi": null,
"abstractUrl": "/journal/tp/2011/05/ttp2011050931/13rRUB6Sq1z",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2012/06/06186734",
"title": "Monocular 3D Reconstruction of Locally Textured Surfaces",
"doi": null,
"abstractUrl": "/journal/tp/2012/06/06186734/13rRUIIVllA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/01/07539397",
"title": "Importance of Matching Physical Friction, Hardness, and Texture in Creating Realistic Haptic Virtual Surfaces",
"doi": null,
"abstractUrl": "/journal/th/2017/01/07539397/13rRUxAAT7O",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwdbV00",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrJAea0",
"doi": "10.1109/CVPR.2012.6247825",
"title": "Modulation transfer function of patch-based stereo systems",
"normalizedTitle": "Modulation transfer function of patch-based stereo systems",
"abstract": "A widely used technique to recover a 3D surface from photographs is patch-based (multi-view) stereo reconstruction. Current methods are able to reproduce fine surface details, they are however limited by the sampling density and the patch size used for reconstruction. We show that there is a systematic error in the reconstruction depending on the details in the unknown surface (frequencies) and the reconstruction resolution. For this purpose we present a theoretical analysis of patch-based depth reconstruction. We prove that our model of the reconstruction process yields a linear system, allowing us to apply the transfer (or system) function concept. We derive the modulation transfer function theoretically and validate it experimentally on synthetic examples using rendered images as well as on photographs of a 3D test target. Our analysis proves that there is a significant but predictable amplitude loss in reconstructions of fine scale details. In a first experiment on real-world data we show how this can be compensated for within the limits of noise and reconstruction accuracy by an inverse transfer function in frequency space.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A widely used technique to recover a 3D surface from photographs is patch-based (multi-view) stereo reconstruction. Current methods are able to reproduce fine surface details, they are however limited by the sampling density and the patch size used for reconstruction. We show that there is a systematic error in the reconstruction depending on the details in the unknown surface (frequencies) and the reconstruction resolution. For this purpose we present a theoretical analysis of patch-based depth reconstruction. We prove that our model of the reconstruction process yields a linear system, allowing us to apply the transfer (or system) function concept. We derive the modulation transfer function theoretically and validate it experimentally on synthetic examples using rendered images as well as on photographs of a 3D test target. Our analysis proves that there is a significant but predictable amplitude loss in reconstructions of fine scale details. In a first experiment on real-world data we show how this can be compensated for within the limits of noise and reconstruction accuracy by an inverse transfer function in frequency space.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A widely used technique to recover a 3D surface from photographs is patch-based (multi-view) stereo reconstruction. Current methods are able to reproduce fine surface details, they are however limited by the sampling density and the patch size used for reconstruction. We show that there is a systematic error in the reconstruction depending on the details in the unknown surface (frequencies) and the reconstruction resolution. For this purpose we present a theoretical analysis of patch-based depth reconstruction. We prove that our model of the reconstruction process yields a linear system, allowing us to apply the transfer (or system) function concept. We derive the modulation transfer function theoretically and validate it experimentally on synthetic examples using rendered images as well as on photographs of a 3D test target. Our analysis proves that there is a significant but predictable amplitude loss in reconstructions of fine scale details. In a first experiment on real-world data we show how this can be compensated for within the limits of noise and reconstruction accuracy by an inverse transfer function in frequency space.",
"fno": "175P2A25",
"keywords": [
"Transfer Functions",
"Image Reconstruction",
"Image Resolution",
"Image Sampling",
"Modulation",
"Stereo Image Processing",
"Inverse Transfer Function",
"Modulation Transfer Function",
"Patch Based Stereo System",
"3 D Surface Recovery",
"Photograph",
"Patch Based Multiview Stereo Reconstruction",
"Fine Surface Detail Reproduction",
"Sampling Density",
"Systematic Error",
"Reconstruction Resolution",
"Patch Based Depth Reconstruction",
"Linear System",
"Image Rendering",
"Image Reconstruction",
"Geometry",
"Surface Reconstruction",
"Transfer Functions",
"Image Resolution",
"Cameras",
"Fourier Transforms"
],
"authors": [
{
"affiliation": null,
"fullName": "M. Goesele",
"givenName": "M.",
"surname": "Goesele",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "A. Kuijper",
"givenName": "A.",
"surname": "Kuijper",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "R. Klowsky",
"givenName": "R.",
"surname": "Klowsky",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1386-1393",
"year": "2012",
"issn": "1063-6919",
"isbn": "978-1-4673-1226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "174P2A24",
"articleId": "12OmNBlXs3n",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "176P2A26",
"articleId": "12OmNqyDjs0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1992/2855/0/00223147",
"title": "Shape reconstruction from photometric stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223147/12OmNAWH9Gg",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/117P1C09",
"title": "Reconstruction of super-resolution lung 4D-CT using patch-based sparse representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/117P1C09/12OmNCy2L1C",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460252",
"title": "Depth-map merging for Multi-View Stereo with high resolution images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460252/12OmNwNwzMv",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410374",
"title": "Image representation for stereo: stripes and stripe adjacency graph",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410374/12OmNy87QtQ",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1988/0862/0/00196233",
"title": "Analysis of two new stereo algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1988/00196233/12OmNyGbI6Y",
"parentPublication": {
"id": "proceedings/cvpr/1988/0862/0",
"title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1990/2057/0/00139576",
"title": "Active surface reconstruction by integrating focus, vergence, stereo, and camera calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1990/00139576/12OmNym2c6Y",
"parentPublication": {
"id": "proceedings/iccv/1990/2057/0",
"title": "Proceedings Third International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1997/03/i0247",
"title": "Patch-Based Stereo in a General Binocular Viewing Geometry",
"doi": null,
"abstractUrl": "/journal/tp/1997/03/i0247/13rRUILc8g1",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/03/ttp2010030546",
"title": "Photometric Stereo via Expectation Maximization",
"doi": null,
"abstractUrl": "/journal/tp/2010/03/ttp2010030546/13rRUxNmPF1",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2018/5321/0/08499088",
"title": "Image Reconstruction from Patch Compressive Sensing Measurements",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2018/08499088/17D45VtKiz4",
"parentPublication": {
"id": "proceedings/bigmm/2018/5321/0",
"title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000d041",
"title": "Learning Patch Reconstructability for Accelerating Multi-view Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000d041/17D45XacGjq",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNweTvOy",
"doi": "",
"title": "Detecting discontinuities for surface reconstruction",
"normalizedTitle": "Detecting discontinuities for surface reconstruction",
"abstract": "Photometric stereo algorithms produce a map of normal directions from the input images. The 3D surface can be reconstructed from this normal map. Existing surface reconstruction works often assume the normal map is integrable but contaminated by small scale non-integrable noise. However, real surfaces often contain large discontinuities such as occlusion boundaries and sharp depth changes, which break the integrable assumption commonly made in many works. Here, we propose a method to detect these discontinuities by combining multiple geometric cues with trained classifiers and a simple graph optimization. The surface is then reconstructed with the guidance of these detected discontinuities. Experiments show our method outperforms existing works.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Photometric stereo algorithms produce a map of normal directions from the input images. The 3D surface can be reconstructed from this normal map. Existing surface reconstruction works often assume the normal map is integrable but contaminated by small scale non-integrable noise. However, real surfaces often contain large discontinuities such as occlusion boundaries and sharp depth changes, which break the integrable assumption commonly made in many works. Here, we propose a method to detect these discontinuities by combining multiple geometric cues with trained classifiers and a simple graph optimization. The surface is then reconstructed with the guidance of these detected discontinuities. Experiments show our method outperforms existing works.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Photometric stereo algorithms produce a map of normal directions from the input images. The 3D surface can be reconstructed from this normal map. Existing surface reconstruction works often assume the normal map is integrable but contaminated by small scale non-integrable noise. However, real surfaces often contain large discontinuities such as occlusion boundaries and sharp depth changes, which break the integrable assumption commonly made in many works. Here, we propose a method to detect these discontinuities by combining multiple geometric cues with trained classifiers and a simple graph optimization. The surface is then reconstructed with the guidance of these detected discontinuities. Experiments show our method outperforms existing works.",
"fno": "06460577",
"keywords": [
"Graph Theory",
"Image Classification",
"Image Reconstruction",
"Optimisation",
"Stereo Image Processing",
"Photometric Stereo Algorithms",
"3 D Surface Reconstruction",
"Normal Map",
"Occlusion Boundaries",
"Sharp Depth Changes",
"Multiple Geometric Cues",
"Trained Classifiers",
"Graph Optimization",
"Discontinuity Detection",
"Image Edge Detection",
"Surface Reconstruction",
"Image Reconstruction",
"Cameras",
"Shape",
"Educational Institutions",
"Face"
],
"authors": [
{
"affiliation": "Zhejiang Provincial Key Laboratory of Service Robot, College of Computer Science, Zhejiang University",
"fullName": "Yinting Wang",
"givenName": "Yinting",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang Provincial Key Laboratory of Service Robot, College of Computer Science, Zhejiang University",
"fullName": "Jiajun Bu",
"givenName": "Jiajun",
"surname": "Bu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang International Studies University",
"fullName": "Na Li",
"givenName": "Na",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang Provincial Key Laboratory of Service Robot, College of Computer Science, Zhejiang University",
"fullName": "Mingli Song",
"givenName": "Mingli",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Singapore",
"fullName": "Ping Tan",
"givenName": "Ping",
"surname": "Tan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "2108-2111",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460576",
"articleId": "12OmNAq3hCB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460578",
"articleId": "12OmNBWzHOE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1988/0862/0/00196227",
"title": "Improving visible-surface reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1988/00196227/12OmNARAn8b",
"parentPublication": {
"id": "proceedings/cvpr/1988/0862/0",
"title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284991",
"title": "Modeling Surface from a Single Grayscale Image",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284991/12OmNAXPyeN",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2006/2825/0/04155754",
"title": "Beyond Silhouettes: Surface Reconstruction Using Multi-Flash Photography",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/04155754/12OmNBa2iEF",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1993/3870/0/00378168",
"title": "Surface discontinuities in range images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378168/12OmNrGKesm",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c291",
"title": "Robust Surface Reconstruction via Triple Sparsity",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c291/12OmNrIaems",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1990/2057/0/00139537",
"title": "Representing surface curvature discontinuities on curved surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1990/00139537/12OmNvT2peK",
"parentPublication": {
"id": "proceedings/iccv/1990/2057/0",
"title": "Proceedings Third International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1989/1952/0/00037854",
"title": "Discontinuity preserving surface reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1989/00037854/12OmNyPQ4Q6",
"parentPublication": {
"id": "proceedings/cvpr/1989/1952/0",
"title": "1989 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457427",
"title": "Shape from depth discontinuities under orthographic projection",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457427/12OmNyvoXiU",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2910/0/00201643",
"title": "Surface reconstruction directly from binocular stereo images by multiscale-multistage regularization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201643/12OmNzn38WG",
"parentPublication": {
"id": "proceedings/icpr/1992/2910/0",
"title": "1992 11th IAPR International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1993/05/i0492",
"title": "Edge Detection and Surface Reconstruction Using Refined Regularization",
"doi": null,
"abstractUrl": "/journal/tp/1993/05/i0492/13rRUxC0SF0",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCbU3aO",
"title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)",
"acronym": "icdar",
"groupId": "1000219",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyKJiif",
"doi": "10.1109/ICDAR.2013.88",
"title": "A Book Dewarping System by Boundary-Based 3D Surface Reconstruction",
"normalizedTitle": "A Book Dewarping System by Boundary-Based 3D Surface Reconstruction",
"abstract": "Non-contact imaging devices such as digital cameras and overhead scanners can convert hardcopy books to digital images without cutting them to individual pages. However, the captured images have distinct distortions. A book dewarping system is proposed to remove the perspective and geometric distortions automatically from single images. A book boundary model is extracted, and a 3D book surface is reconstructed. And then the horizontal and vertical metrics of each column are restored from it. Experimental results show the good dewarping and speed performance. Since no additional equipments and no restrictions to specific book layouts or contents are needed, the proposed system is very practical in real applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Non-contact imaging devices such as digital cameras and overhead scanners can convert hardcopy books to digital images without cutting them to individual pages. However, the captured images have distinct distortions. A book dewarping system is proposed to remove the perspective and geometric distortions automatically from single images. A book boundary model is extracted, and a 3D book surface is reconstructed. And then the horizontal and vertical metrics of each column are restored from it. Experimental results show the good dewarping and speed performance. Since no additional equipments and no restrictions to specific book layouts or contents are needed, the proposed system is very practical in real applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Non-contact imaging devices such as digital cameras and overhead scanners can convert hardcopy books to digital images without cutting them to individual pages. However, the captured images have distinct distortions. A book dewarping system is proposed to remove the perspective and geometric distortions automatically from single images. A book boundary model is extracted, and a 3D book surface is reconstructed. And then the horizontal and vertical metrics of each column are restored from it. Experimental results show the good dewarping and speed performance. Since no additional equipments and no restrictions to specific book layouts or contents are needed, the proposed system is very practical in real applications.",
"fno": "06628653",
"keywords": [
"Three Dimensional Displays",
"Surface Reconstruction",
"Solid Modeling",
"Image Reconstruction",
"Image Segmentation",
"Measurement",
"Imaging",
"3 D Surface Reconstruction",
"Document Image Rectification",
"Image Dewarping",
"Document Image Restoration"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuan He",
"givenName": "Yuan",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pan Pan",
"givenName": "Pan",
"surname": "Pan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shufu Xie",
"givenName": "Shufu",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jun Sun",
"givenName": "Jun",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Satoshi Naoi",
"givenName": "Satoshi",
"surname": "Naoi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-08-01T00:00:00",
"pubType": "proceedings",
"pages": "403-407",
"year": "2013",
"issn": "1520-5363",
"isbn": "978-0-7695-4999-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06628652",
"articleId": "12OmNrNh0sn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06628654",
"articleId": "12OmNxzuMKA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2009/3725/0/3725a956",
"title": "A Methodology for Document Image Dewarping Techniques Performance Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2009/3725a956/12OmNBCqbzK",
"parentPublication": {
"id": "proceedings/icdar/2009/3725/0",
"title": "2009 10th International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881429",
"title": "Uniform vs Full Height Maps Using a Time of Flight Device for Dewarping Book Spread Images in the Design of an Automated Book Reader",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881429/12OmNButq2B",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1990/2057/0/00139539",
"title": "Surface shape reconstruction of an undulating transparent object",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1990/00139539/12OmNvsm6vi",
"parentPublication": {
"id": "proceedings/iccv/1990/2057/0",
"title": "Proceedings Third International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2008/3337/0/3337a209",
"title": "A Two-Step Dewarping of Camera Document Images",
"doi": null,
"abstractUrl": "/proceedings-article/das/2008/3337a209/12OmNx76TGQ",
"parentPublication": {
"id": "proceedings/das/2008/3337/0",
"title": "2008 The Eighth IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdip/2009/3565/0/3565a190",
"title": "Model-Based Book Dewarping Method for Content-Independent Document Images",
"doi": null,
"abstractUrl": "/proceedings-article/icdip/2009/3565a190/12OmNyuy9No",
"parentPublication": {
"id": "proceedings/icdip/2009/3565/0",
"title": "Digital Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/02/i0195",
"title": "Restoring Warped Document Images through 3D Shape Modeling",
"doi": null,
"abstractUrl": "/journal/tp/2006/02/i0195/13rRUwbaqMA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06767138",
"title": "Detection and Reconstruction of an Implicit Boundary Surface by Adaptively Expanding A Small Surface Patch in a 3D Image",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06767138/13rRUxBJhFy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600e563",
"title": "Fourier Document Restoration for Robust Document Dewarping and Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600e563/1H1mJg25mCY",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09992129",
"title": "Line-Based 3D Building Abstraction and Polygonal Surface Reconstruction From Images",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09992129/1JevCrH10vS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdarw/2019/5054/4/505404a001",
"title": "A Fast Page Outline Detection and Dewarping Method Based on Iterative Cut and Adaptive Coordinate Transform",
"doi": null,
"abstractUrl": "/proceedings-article/icdarw/2019/505404a001/1eLye6XGJd6",
"parentPublication": {
"id": "icdarw/2019/5054/4",
"title": "2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XacGjq",
"doi": "10.1109/CVPR.2018.00321",
"title": "Learning Patch Reconstructability for Accelerating Multi-view Stereo",
"normalizedTitle": "Learning Patch Reconstructability for Accelerating Multi-view Stereo",
"abstract": "We present an approach to accelerate multi-view stereo (MVS) by prioritizing computation on image patches that are likely to produce accurate 3D surface reconstructions. Our key insight is that the accuracy of the surface reconstruction from a given image patch can be predicted significantly faster than performing the actual stereo matching. The intuition is that non-specular, fronto-parallel, in-focus patches are more likely to produce accurate surface reconstructions than highly specular, slanted, blurry patches - and that these properties can be reliably predicted from the image itself. By prioritizing stereo matching on a subset of patches that are highly reconstructable and also cover the 3D surface, we are able to accelerate MVS with minimal reduction in accuracy and completeness. To predict the reconstructability score of an image patch from a single view, we train an image-to-reconstructability neural network: the I2RNet. This reconstructability score enables us to efficiently identify image patches that are likely to provide the most accurate surface estimates before performing stereo matching. We demonstrate that the I2RNet, when trained on the ScanNet dataset, generalizes to the DTU and Tanks & Temples MVS datasets. By using our I2RNet with an existing MVS implementation, we show that our method can achieve more than a 30× speed-up over the baseline with only an minimal loss in completeness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an approach to accelerate multi-view stereo (MVS) by prioritizing computation on image patches that are likely to produce accurate 3D surface reconstructions. Our key insight is that the accuracy of the surface reconstruction from a given image patch can be predicted significantly faster than performing the actual stereo matching. The intuition is that non-specular, fronto-parallel, in-focus patches are more likely to produce accurate surface reconstructions than highly specular, slanted, blurry patches - and that these properties can be reliably predicted from the image itself. By prioritizing stereo matching on a subset of patches that are highly reconstructable and also cover the 3D surface, we are able to accelerate MVS with minimal reduction in accuracy and completeness. To predict the reconstructability score of an image patch from a single view, we train an image-to-reconstructability neural network: the I2RNet. This reconstructability score enables us to efficiently identify image patches that are likely to provide the most accurate surface estimates before performing stereo matching. We demonstrate that the I2RNet, when trained on the ScanNet dataset, generalizes to the DTU and Tanks & Temples MVS datasets. By using our I2RNet with an existing MVS implementation, we show that our method can achieve more than a 30× speed-up over the baseline with only an minimal loss in completeness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an approach to accelerate multi-view stereo (MVS) by prioritizing computation on image patches that are likely to produce accurate 3D surface reconstructions. Our key insight is that the accuracy of the surface reconstruction from a given image patch can be predicted significantly faster than performing the actual stereo matching. The intuition is that non-specular, fronto-parallel, in-focus patches are more likely to produce accurate surface reconstructions than highly specular, slanted, blurry patches - and that these properties can be reliably predicted from the image itself. By prioritizing stereo matching on a subset of patches that are highly reconstructable and also cover the 3D surface, we are able to accelerate MVS with minimal reduction in accuracy and completeness. To predict the reconstructability score of an image patch from a single view, we train an image-to-reconstructability neural network: the I2RNet. This reconstructability score enables us to efficiently identify image patches that are likely to provide the most accurate surface estimates before performing stereo matching. We demonstrate that the I2RNet, when trained on the ScanNet dataset, generalizes to the DTU and Tanks & Temples MVS datasets. By using our I2RNet with an existing MVS implementation, we show that our method can achieve more than a 30× speed-up over the baseline with only an minimal loss in completeness.",
"fno": "642000d041",
"keywords": [
"Image Matching",
"Image Reconstruction",
"Neural Nets",
"Stereo Image Processing",
"I 2 R Net",
"MVS",
"Image To Reconstructability Neural Network",
"Multi View Stereo",
"3 D Surface Reconstructions",
"Stereo Matching",
"Learning Patch Reconstructability",
"Scan Net Dataset",
"Image Reconstruction",
"Surface Reconstruction",
"Three Dimensional Displays",
"Acceleration",
"Pipelines",
"Surface Treatment",
"Neural Networks"
],
"authors": [
{
"affiliation": null,
"fullName": "Alex Poms",
"givenName": "Alex",
"surname": "Poms",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chenglei Wu",
"givenName": "Chenglei",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shoou-I Yu",
"givenName": "Shoou-I",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yaser Sheikh",
"givenName": "Yaser",
"surname": "Sheikh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3041-3050",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000d032",
"articleId": "17D45WXIkyK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000d051",
"articleId": "17D45WODaoS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2009/3992/0/05206712",
"title": "Continuous depth estimation for multi-view stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206712/12OmNBkP3zD",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459384",
"title": "Complete multi-view reconstruction of dynamic scenes from probabilistic fusion of narrow and wide baseline stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459384/12OmNwCJOQY",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/06909906",
"title": "Occluding Contours for Multi-view Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/06909906/12OmNxG1yOo",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851f479",
"title": "Just Look at the Image: Viewpoint-Specific Surface Normal Prediction for Improved Multi-View Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851f479/12OmNzvQI3W",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d967",
"title": "Neural Radiance Fields Approach to Deep Multi-View Photometric Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d967/1B12MeL2yhW",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g128",
"title": "Rational Polynomial Camera Model Warping for Deep Learning Based Satellite Multi-View Stereo Matching",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g128/1BmEqZNJ5mg",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2591",
"title": "Uncertainty-Aware Deep Multi-View Photometric Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2591/1H1lGLw9BaE",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d036",
"title": "nLMVS-Net: Deep Non-Lambertian Multi-View Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d036/1KxVtHKl5Cw",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093583",
"title": "NRMVS: Non-Rigid Multi-View Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093583/1jPbfZI3SVi",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c036",
"title": "Mesh-Guided Multi-View Stereo With Pyramid Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c036/1m3nZpSzuaQ",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1i5mkDyiIUg",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1i5mzcJ4CTC",
"doi": "10.1109/ICCVW.2019.00498",
"title": "Patch-Based Reconstruction of a Textureless Deformable 3D Surface from a Single RGB Image",
"normalizedTitle": "Patch-Based Reconstruction of a Textureless Deformable 3D Surface from a Single RGB Image",
"abstract": "We propose a deep learning method for reconstructing a textureless deformable 3D surface from a single RGB image, under various lighting conditions. One of the challenges when training a neural network to predict the shape of a deformable object is that the object exhibits such a great deal of shape variation that it is essentially impractical to have a training set consisting of all possible deformations the object may realize. However, different areas of the deformable object may exhibit similar types of deformations, e.g. similar wrinkles might appear in different areas on the surface of a cloth. Motivated by this, we propose learning local models of shape variation from image patches that we then combine into a global reconstruction of the observed object. Initially, we divide the input image into overlapping patches and a zero-mean depth map as well as a normal map are estimated for each patch using deep learning. Stitching of depth maps is performed by finding the optimal translation of each patch depth map along the viewing direction of the camera and averaging the depth predictions of neighboring patches at their overlapping areas. Stitching of normal maps is performed by normalizing and averaging the normals predictions of neighboring patches at their overlapping areas. Finally, bilateral filtering is performed on the stitched depth and normal maps in order to perform fine-scale smoothing at the regions around patch boundaries. We show increased accuracy compared to previous work even in the presence of limited training data and more effective generalization to unseen objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a deep learning method for reconstructing a textureless deformable 3D surface from a single RGB image, under various lighting conditions. One of the challenges when training a neural network to predict the shape of a deformable object is that the object exhibits such a great deal of shape variation that it is essentially impractical to have a training set consisting of all possible deformations the object may realize. However, different areas of the deformable object may exhibit similar types of deformations, e.g. similar wrinkles might appear in different areas on the surface of a cloth. Motivated by this, we propose learning local models of shape variation from image patches that we then combine into a global reconstruction of the observed object. Initially, we divide the input image into overlapping patches and a zero-mean depth map as well as a normal map are estimated for each patch using deep learning. Stitching of depth maps is performed by finding the optimal translation of each patch depth map along the viewing direction of the camera and averaging the depth predictions of neighboring patches at their overlapping areas. Stitching of normal maps is performed by normalizing and averaging the normals predictions of neighboring patches at their overlapping areas. Finally, bilateral filtering is performed on the stitched depth and normal maps in order to perform fine-scale smoothing at the regions around patch boundaries. We show increased accuracy compared to previous work even in the presence of limited training data and more effective generalization to unseen objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a deep learning method for reconstructing a textureless deformable 3D surface from a single RGB image, under various lighting conditions. One of the challenges when training a neural network to predict the shape of a deformable object is that the object exhibits such a great deal of shape variation that it is essentially impractical to have a training set consisting of all possible deformations the object may realize. However, different areas of the deformable object may exhibit similar types of deformations, e.g. similar wrinkles might appear in different areas on the surface of a cloth. Motivated by this, we propose learning local models of shape variation from image patches that we then combine into a global reconstruction of the observed object. Initially, we divide the input image into overlapping patches and a zero-mean depth map as well as a normal map are estimated for each patch using deep learning. Stitching of depth maps is performed by finding the optimal translation of each patch depth map along the viewing direction of the camera and averaging the depth predictions of neighboring patches at their overlapping areas. Stitching of normal maps is performed by normalizing and averaging the normals predictions of neighboring patches at their overlapping areas. Finally, bilateral filtering is performed on the stitched depth and normal maps in order to perform fine-scale smoothing at the regions around patch boundaries. We show increased accuracy compared to previous work even in the presence of limited training data and more effective generalization to unseen objects.",
"fno": "09022546",
"keywords": [
"Image Colour Analysis",
"Image Filtering",
"Image Reconstruction",
"Image Texture",
"Learning Artificial Intelligence",
"Input Image",
"Observed Object",
"Global Reconstruction",
"Image Patches",
"Similar Wrinkles",
"Similar Types",
"Possible Deformations",
"Shape Variation",
"Deformable Object",
"Lighting Conditions",
"Deep Learning Method",
"Single RGB Image",
"Textureless Deformable 3 D",
"Patch Based Reconstruction",
"Unseen Objects",
"Patch Boundaries",
"Stitched Depth",
"Overlapping Areas",
"Depth Predictions",
"Patch Depth Map",
"Normal Map",
"Zero Mean Depth Map",
"Three Dimensional Displays",
"Image Reconstruction",
"Shape",
"Strain",
"Deformable Models",
"Surface Reconstruction",
"Machine Learning",
"3 D From A Single Image",
"Deep Learning",
"Deformable Object Depth Estimation"
],
"authors": [
{
"affiliation": "Foundation for Research and Technology - Hellas",
"fullName": "Aggeliki Tsoli",
"givenName": "Aggeliki",
"surname": "Tsoli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Foundation for Research and Technology - Hellas",
"fullName": "Antonis. A. Argyros",
"givenName": "Antonis. A.",
"surname": "Argyros",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "4034-4043",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5023-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09022017",
"articleId": "1i5mMluVUje",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09022322",
"articleId": "1i5myiFA2hG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a038",
"title": "Dynamic High Resolution Deformable Articulated Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a038/12OmNqFa5nJ",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459403",
"title": "Template-free monocular reconstruction of deformable surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459403/12OmNqJHFsI",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c917",
"title": "Single Image Super-resolution Using Deformable Patches",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c917/12OmNyfdOSd",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a333",
"title": "Tracking Deformable Surfaces That Undergo Topological Changes Using an RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a333/12OmNyugyRN",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/est/2013/5077/0/5077a079",
"title": "Tissue Surface Model Mapping onto Arbitrary Target Surface Based on Self-Organizing Deformable Model",
"doi": null,
"abstractUrl": "/proceedings-article/est/2013/5077a079/12OmNzUgcYT",
"parentPublication": {
"id": "proceedings/est/2013/5077/0",
"title": "2013 Fourth International Conference on Emerging Security Technologies (EST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1992/05/i0572",
"title": "Surface Reconstruction Using Deformable Models with Interior and Boundary Constraints",
"doi": null,
"abstractUrl": "/journal/tp/1992/05/i0572/13rRUx0xPnR",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a042",
"title": "Patch-Based Non-rigid 3D Reconstruction from a Single Depth Stream",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a042/17D45WGGoME",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300a901",
"title": "Deformable Surface Tracking by Graph Matching",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300a901/1hQqlPMirWU",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e685",
"title": "Correspondence-Free Material Reconstruction using Sparse Surface Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e685/1m3ngsvU9zO",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412352",
"title": "Learning non-rigid surface reconstruction from spatia-temporal image patches",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412352/1tmipvpRjqw",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCcbEdk",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"acronym": "visapp",
"groupId": "1806906",
"volume": "1",
"displayVolume": "1",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqIQSaP",
"doi": "",
"title": "Shape similarity based surface registration",
"normalizedTitle": "Shape similarity based surface registration",
"abstract": "In the last 20 years many approaches for the registration and localization of surfaces were developed. Most of them generate solutions by minimizing point distances or maximizing contact areas between surface points. Other algorithms try to detect corresponding points on the two surfaces by searching for points with same features and align them. However, aligning and localizing self-similar surfaces or surfaces having large regions with approximately constant curvature is still a complex problem.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the last 20 years many approaches for the registration and localization of surfaces were developed. Most of them generate solutions by minimizing point distances or maximizing contact areas between surface points. Other algorithms try to detect corresponding points on the two surfaces by searching for points with same features and align them. However, aligning and localizing self-similar surfaces or surfaces having large regions with approximately constant curvature is still a complex problem.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the last 20 years many approaches for the registration and localization of surfaces were developed. Most of them generate solutions by minimizing point distances or maximizing contact areas between surface points. Other algorithms try to detect corresponding points on the two surfaces by searching for points with same features and align them. However, aligning and localizing self-similar surfaces or surfaces having large regions with approximately constant curvature is still a complex problem.",
"fno": "07294830",
"keywords": [
"Three Dimensional Displays",
"Surface Treatment",
"Surface Cracks",
"Surface Reconstruction",
"Runtime",
"Iterative Closest Point Algorithm",
"Robustness",
"3 D Puzzle",
"Surface Registration",
"Scan Alignment",
"Self Similarity",
"Surface Based Feature",
"RANSAC",
"RANSAM"
],
"authors": [
{
"affiliation": "Institute for Robotics and Process Control, TU Braunschweig, Muehlenpfordtstr. 23, Germany",
"fullName": "Manuel Frei",
"givenName": "Manuel",
"surname": "Frei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Robotics and Process Control, TU Braunschweig, Muehlenpfordtstr. 23, Germany",
"fullName": "Simon Winkelbach",
"givenName": "Simon",
"surname": "Winkelbach",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "visapp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-01-01T00:00:00",
"pubType": "proceedings",
"pages": "359-366",
"year": "2014",
"issn": null,
"isbn": "978-9-8975-8133-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07294829",
"articleId": "12OmNAoDi8G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07294831",
"articleId": "12OmNwKGAn5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032a938",
"title": "Surface Registration via Foliation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a938/12OmNB0X8tQ",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2015/8562/0/07439538",
"title": "Evaluation of pattern based point clouds for patient registration — A phantom study",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2015/07439538/12OmNrJROX1",
"parentPublication": {
"id": "proceedings/iciibms/2015/8562/0",
"title": "2015 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a649",
"title": "Point Cloud Registration with Virtual Interest Points from Implicit Quadric Surface Intersections",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a649/12OmNrNh0Qj",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2013/4893/0/06456227",
"title": "Estimating Discrete Surface Curvature Based on Voronoi Poles",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2013/06456227/12OmNvT2oWn",
"parentPublication": {
"id": "proceedings/isdea/2013/4893/0",
"title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/1/07294861",
"title": "Non-rigid surface registration using cover tree based clustering and nearest neighbor search",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07294861/12OmNwDj10u",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/1",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981a155",
"title": "Identification and Marking of Molecular Surface Feature Regions Based on Spherical Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a155/12OmNwnYFZX",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06814973",
"title": "Polar Embedded Catmull-Clark Subdivision Surface",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06814973/12OmNwtEEOJ",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2011/4369/0/4369a290",
"title": "Sampling Relevant Points for Surface Registration",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2011/4369a290/12OmNzSQdsh",
"parentPublication": {
"id": "proceedings/3dimpvt/2011/4369/0",
"title": "2011 International Conference on 3D Imaging, Modeling, Processing, Visualization and Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1997/7943/0/79430121",
"title": "Surface Registration by Matching Oriented Points",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1997/79430121/12OmNzdoN7H",
"parentPublication": {
"id": "proceedings/3dim/1997/7943/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-2/2019/2850/0/285000a160",
"title": "Hybrid Polygon-Point Rendering of Singular and Non-Manifold Implicit Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/iv-2/2019/285000a160/1cMEQnNfRXG",
"parentPublication": {
"id": "proceedings/iv-2/2019/2850/0",
"title": "2019 23rd International Conference in Information Visualization – Part II",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwCsdKG",
"doi": "10.1109/ICCV.2017.255",
"title": "3D Surface Detail Enhancement from a Single Normal Map",
"normalizedTitle": "3D Surface Detail Enhancement from a Single Normal Map",
"abstract": "In 3D reconstruction, the obtained surface details are mainly limited to the visual sensor due to sampling and quantization in the digitalization process. How to get a fine-grained 3D surface with low-cost is still a challenging obstacle in terms of experience, equipment and easyto-obtain. This work introduces a novel framework for enhancing surfaces reconstructed from normal map, where the assumptions on hardware (e.g., photometric stereo setup) and reflection model (e.g., Lambertion reflection) are not necessarily needed. We propose to use a new measure, angle profile, to infer the hidden micro-structure from existing surfaces. In addition, the inferred results are further improved in the domain of discrete geometry processing (DGP) which is able to achieve a stable surface structure under a selectable enhancement setting. Extensive simulation results show that the proposed method obtains significantly improvements over uniform sharpening method in terms of both subjective visual assessment and objective quality metric.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In 3D reconstruction, the obtained surface details are mainly limited to the visual sensor due to sampling and quantization in the digitalization process. How to get a fine-grained 3D surface with low-cost is still a challenging obstacle in terms of experience, equipment and easyto-obtain. This work introduces a novel framework for enhancing surfaces reconstructed from normal map, where the assumptions on hardware (e.g., photometric stereo setup) and reflection model (e.g., Lambertion reflection) are not necessarily needed. We propose to use a new measure, angle profile, to infer the hidden micro-structure from existing surfaces. In addition, the inferred results are further improved in the domain of discrete geometry processing (DGP) which is able to achieve a stable surface structure under a selectable enhancement setting. Extensive simulation results show that the proposed method obtains significantly improvements over uniform sharpening method in terms of both subjective visual assessment and objective quality metric.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In 3D reconstruction, the obtained surface details are mainly limited to the visual sensor due to sampling and quantization in the digitalization process. How to get a fine-grained 3D surface with low-cost is still a challenging obstacle in terms of experience, equipment and easyto-obtain. This work introduces a novel framework for enhancing surfaces reconstructed from normal map, where the assumptions on hardware (e.g., photometric stereo setup) and reflection model (e.g., Lambertion reflection) are not necessarily needed. We propose to use a new measure, angle profile, to infer the hidden micro-structure from existing surfaces. In addition, the inferred results are further improved in the domain of discrete geometry processing (DGP) which is able to achieve a stable surface structure under a selectable enhancement setting. Extensive simulation results show that the proposed method obtains significantly improvements over uniform sharpening method in terms of both subjective visual assessment and objective quality metric.",
"fno": "1032c344",
"keywords": [
"Geometry",
"Image Enhancement",
"Image Reconstruction",
"Objective Quality Metric",
"Angle Profile",
"Surface Reconstruction",
"3 D Reconstruction",
"Surface Structure",
"Discrete Geometry Processing",
"Normal Map",
"Fine Grained 3 D Surface",
"Digitalization Process",
"Quantization",
"Visual Sensor",
"3 D Surface Detail Enhancement",
"Subjective Visual Assessment",
"Three Dimensional Displays",
"Surface Reconstruction",
"Surface Treatment",
"Surface Structures",
"Surface Texture",
"Shape",
"Lighting"
],
"authors": [
{
"affiliation": null,
"fullName": "Wuyuan Xie",
"givenName": "Wuyuan",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Miaohui Wang",
"givenName": "Miaohui",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xianbiao Qi",
"givenName": "Xianbiao",
"surname": "Qi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lei Zhang",
"givenName": "Lei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2344-2352",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032c335",
"articleId": "12OmNAL3B8G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032c353",
"articleId": "12OmNyPQ4BP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2017/0733/0/0733b735",
"title": "Surface Normal Reconstruction from Specular Information in Light Field Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733b735/12OmNAP1YZr",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402544",
"title": "Real-time surface light-field capture for augmentation of planar specular surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402544/12OmNASILPn",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420876",
"title": "Recovering object surfaces from viewed changes in surface texture patterns",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420876/12OmNvT2p2H",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643566",
"title": "Build your world and play in it: Interacting with surface particles on complex objects",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643566/12OmNzd7byj",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/05/06605689",
"title": "Geodesic Mapping for Dynamic Surface Alignment",
"doi": null,
"abstractUrl": "/journal/tp/2014/05/06605689/13rRUNvPLaQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1986/11/mcg1986110021",
"title": "Environment Mapping and Other Applications of World Projections",
"doi": null,
"abstractUrl": "/magazine/cg/1986/11/mcg1986110021/13rRUNvgzc6",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/02/07858760",
"title": "Shading-Based Surface Detail Recovery Under General Unknown Illumination",
"doi": null,
"abstractUrl": "/journal/tp/2018/02/07858760/13rRUwdIOW8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/05/ttg2010050763",
"title": "Sample-Based Surface Coloring",
"doi": null,
"abstractUrl": "/journal/tg/2010/05/ttg2010050763/13rRUxC0SW5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07358132",
"title": "On Frictional Forces between the Finger and a Textured Surface during Active Touch",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07358132/13rRUxZzAhO",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a356",
"title": "3D Surface Detail Enhancement",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a356/1vg8o7U0NDG",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy2agRS",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"acronym": "cad-graphics",
"groupId": "1001488",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxXCGKC",
"doi": "10.1109/CADGraphics.2013.76",
"title": "Illusory Motions on Surfaces",
"normalizedTitle": "Illusory Motions on Surfaces",
"abstract": "Illusory motions refer to the phenomena in which static images composed of certain colors and patterns lead to the illusion of motions. This paper presents a first approach to generating illusory motions on 3D surfaces which can be used for shape illustration as well as artistic visualization of line fields on surfaces. Our method extends previous work on generating illusory motions in the plane, which we adapt to 3D surfaces. In addition, we propose novel Repeated Asymmetric Patterns (RAPs) to visualize bidirectional flows, thus enabling the visualization of line fields in the plane and on surfaces. We demonstrate the effectiveness of our method with applications in shape illustration as well as line field visualization on surfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Illusory motions refer to the phenomena in which static images composed of certain colors and patterns lead to the illusion of motions. This paper presents a first approach to generating illusory motions on 3D surfaces which can be used for shape illustration as well as artistic visualization of line fields on surfaces. Our method extends previous work on generating illusory motions in the plane, which we adapt to 3D surfaces. In addition, we propose novel Repeated Asymmetric Patterns (RAPs) to visualize bidirectional flows, thus enabling the visualization of line fields in the plane and on surfaces. We demonstrate the effectiveness of our method with applications in shape illustration as well as line field visualization on surfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Illusory motions refer to the phenomena in which static images composed of certain colors and patterns lead to the illusion of motions. This paper presents a first approach to generating illusory motions on 3D surfaces which can be used for shape illustration as well as artistic visualization of line fields on surfaces. Our method extends previous work on generating illusory motions in the plane, which we adapt to 3D surfaces. In addition, we propose novel Repeated Asymmetric Patterns (RAPs) to visualize bidirectional flows, thus enabling the visualization of line fields in the plane and on surfaces. We demonstrate the effectiveness of our method with applications in shape illustration as well as line field visualization on surfaces.",
"fno": "06815040",
"keywords": [
"Visualization",
"Vectors",
"Three Dimensional Displays",
"Shape",
"Surface Treatment",
"Tensile Stress",
"Line Field",
"Illusory Motion",
"Repeated Asymmetric Patterns"
],
"authors": [
{
"affiliation": null,
"fullName": "Ming-Te Chi",
"givenName": "Ming-Te",
"surname": "Chi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chih-Yuan Yao",
"givenName": "Chih-Yuan",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tong-Yee Lee",
"givenName": "Tong-Yee",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Eugene Zhang",
"givenName": "Eugene",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cad-graphics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-11-01T00:00:00",
"pubType": "proceedings",
"pages": "419-420",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2576-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06815039",
"articleId": "12OmNyNQSFu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06815041",
"articleId": "12OmNwdtwl3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2004/2158/2/01315226",
"title": "Reconstructing open surfaces from unorganized data points",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315226/12OmNBpVQ2L",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/2",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206517",
"title": "On edge detection on surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206517/12OmNyUFfIy",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156371",
"title": "Adaptive particle relaxation for time surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156371/12OmNzJbR3z",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2014/5500/0/5500b008",
"title": "Scalable Computation of Stream Surfaces on Large Scale Vector Fields",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2014/5500b008/12OmNzXFoId",
"parentPublication": {
"id": "proceedings/sc/2014/5500/0",
"title": "SC14: International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539320",
"title": "Decal-Maps: Real-Time Layering of Decals on Surfaces for Multivariate Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539320/13rRUx0gezV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050838",
"title": "Smart Transparency for Illustrative Visualization of Complex Flow Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050838/13rRUxC0SvU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017576",
"title": "Interactive Design and Visualization of Branched Covering Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017576/13rRUzphDy0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08447439",
"title": "Tensor Field Visualization using Fiber Surfaces of Invariant Space",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08447439/17D45W1Oa1M",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/02/08453863",
"title": "Feature Level-Sets: Generalizing Iso-Surfaces to Multi-Variate Data",
"doi": null,
"abstractUrl": "/journal/tg/2020/02/08453863/1gd8NarXyzC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09224154",
"title": "Mode Surfaces of Symmetric Tensor Fields: Topological Analysis and Seamless Extraction",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09224154/1nV63QG11le",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNywfKyu",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzd7byj",
"doi": "10.1109/ISMAR.2010.5643566",
"title": "Build your world and play in it: Interacting with surface particles on complex objects",
"normalizedTitle": "Build your world and play in it: Interacting with surface particles on complex objects",
"abstract": "We explore interacting with everyday objects by representing content as interactive surface particles. Users can build their own physical world, map virtual content onto their physical construction and play directly with the surface using a stylus. A surface particle representation allows programmed content to be created independent of the display object and to be reused on many surfaces. We demonstrated this idea through a projector-camera system that acquires the object geometry and enables direct interaction through an IR tracked stylus. We present three motivating example applications, each displayed on three example surfaces. We discuss a set of interaction techniques that show possible avenues for structuring interaction on complicated everyday objects, such as Surface Adaptive GUIs for menu selection. Through a preliminary informal evaluation and interviews with end users, we demonstrate the potential of interacting with surface particles and identify improvements necessary to make this interaction practical on everyday surfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We explore interacting with everyday objects by representing content as interactive surface particles. Users can build their own physical world, map virtual content onto their physical construction and play directly with the surface using a stylus. A surface particle representation allows programmed content to be created independent of the display object and to be reused on many surfaces. We demonstrated this idea through a projector-camera system that acquires the object geometry and enables direct interaction through an IR tracked stylus. We present three motivating example applications, each displayed on three example surfaces. We discuss a set of interaction techniques that show possible avenues for structuring interaction on complicated everyday objects, such as Surface Adaptive GUIs for menu selection. Through a preliminary informal evaluation and interviews with end users, we demonstrate the potential of interacting with surface particles and identify improvements necessary to make this interaction practical on everyday surfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We explore interacting with everyday objects by representing content as interactive surface particles. Users can build their own physical world, map virtual content onto their physical construction and play directly with the surface using a stylus. A surface particle representation allows programmed content to be created independent of the display object and to be reused on many surfaces. We demonstrated this idea through a projector-camera system that acquires the object geometry and enables direct interaction through an IR tracked stylus. We present three motivating example applications, each displayed on three example surfaces. We discuss a set of interaction techniques that show possible avenues for structuring interaction on complicated everyday objects, such as Surface Adaptive GUIs for menu selection. Through a preliminary informal evaluation and interviews with end users, we demonstrate the potential of interacting with surface particles and identify improvements necessary to make this interaction practical on everyday surfaces.",
"fno": "05643566",
"keywords": [
"Computational Geometry",
"Graphical User Interfaces",
"Solid Modelling",
"Virtual Reality",
"Complex Object",
"Interactive Surface Particle",
"Physical World",
"Virtual Content",
"Projector Camera System",
"Object Geometry",
"IR Tracked Stylus",
"GUI",
"Sprites Computer",
"Three Dimensional Displays",
"Surface Treatment",
"Games",
"Surface Texture",
"Face",
"Visualization",
"H 5 2 Information Interfaces And Presentation User Interfaces Input Devices And Strategies Interaction Styles",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Department of Computer Science, University of Illinois at Urbana-Champaign, 201 N Goodwin Ave, 61801 USA",
"fullName": "Brett R. Jones",
"givenName": "Brett R.",
"surname": "Jones",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Illinois at Urbana-Champaign, 201 N Goodwin Ave, 61801 USA",
"fullName": "Rajinder Sodhi",
"givenName": "Rajinder",
"surname": "Sodhi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Illinois at Urbana-Champaign, 201 N Goodwin Ave, 61801 USA",
"fullName": "Roy H. Campbell",
"givenName": "Roy H.",
"surname": "Campbell",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Illinois at Urbana-Champaign, 201 N Goodwin Ave, 61801 USA",
"fullName": "Guy Garnett",
"givenName": "Guy",
"surname": "Garnett",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Illinois at Urbana-Champaign, 201 N Goodwin Ave, 61801 USA",
"fullName": "Brian P. Bailey",
"givenName": "Brian P.",
"surname": "Bailey",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "165-174",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-9343-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05643564",
"articleId": "12OmNvDqsBr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05643567",
"articleId": "12OmNz6iOOo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/1995/7042/0/70420876",
"title": "Recovering object surfaces from viewed changes in surface texture patterns",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420876/12OmNvT2p2H",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c344",
"title": "3D Surface Detail Enhancement from a Single Normal Map",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c344/12OmNwCsdKG",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2013/5051/0/5051a068",
"title": "Growing Grid-Evolutionary Algorithm for Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2013/5051a068/12OmNwogh3R",
"parentPublication": {
"id": "proceedings/cgiv/2013/5051/0",
"title": "2013 10th International Conference Computer Graphics, Imaging and Visualization (CGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipsn/2017/4890/0/07944790",
"title": "SurfaceVibe: Vibration-Based Tap & Swipe Tracking on Ubiquitous Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ipsn/2017/07944790/12OmNykTNom",
"parentPublication": {
"id": "proceedings/ipsn/2017/4890/0",
"title": "2017 16th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671806",
"title": "Psychophysical exploration of stereoscopic pseudo-transparency",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671806/12OmNyz5JSD",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/05/06605689",
"title": "Geodesic Mapping for Dynamic Surface Alignment",
"doi": null,
"abstractUrl": "/journal/tp/2014/05/06605689/13rRUNvPLaQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/02/07737070",
"title": "Multimodal Feature-Based Surface Material Classification",
"doi": null,
"abstractUrl": "/journal/th/2017/02/07737070/13rRUNvyakZ",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539320",
"title": "Decal-Maps: Real-Time Layering of Decals on Surfaces for Multivariate Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539320/13rRUx0gezV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07358132",
"title": "On Frictional Forces between the Finger and a Textured Surface during Active Touch",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07358132/13rRUxZzAhO",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2013/02/tth2013020181",
"title": "Discrimination of Real and Virtual Surfaces with Sinusoidal and Triangular Gratings Using the Fingertip and Stylus",
"doi": null,
"abstractUrl": "/journal/th/2013/02/tth2013020181/13rRUzphDy7",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy9Prj1",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCzb9xl",
"doi": "10.1109/ICCVW.2017.332",
"title": "Towards Automated Recognition of Facial Expressions in Animal Models",
"normalizedTitle": "Towards Automated Recognition of Facial Expressions in Animal Models",
"abstract": "Facial expressions play a significant role in the expression of emotional states, such as fear, surprise, and happiness in humans and other animals. The current systems for recognizing animal facial expression model in Non-human primates (NHPs) are currently limited to manual decoding of the facial muscles and observations, which is biased, time-consuming and requires a long training process and certification. The main objective of this work is to establish a computational framework for facial recognition systems for automatic recognition NHP facial expressions from standard video recordings with minimal assumptions. The suggested technology consists of: 1)a tailored facial image registration for NHPs; 2)a two-layers unsupervised clustering algorithm that forms an ordered dictionary of facial images for different facial segments; 3)extract dynamical temporal-spectral features;, and recognize dynamic facial expressions. The feasibility of the methods was verified using video recordings of an NHP under various behavioral conditions, recognizing typical NHP facial expressions in the wild. The results were compared to three human experts, and show an agreement of more than 82%. This work is the first attempt for efficient automatic recognition of facial expressions in NHPs using minimal assumptions about the physiology of facial expressions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Facial expressions play a significant role in the expression of emotional states, such as fear, surprise, and happiness in humans and other animals. The current systems for recognizing animal facial expression model in Non-human primates (NHPs) are currently limited to manual decoding of the facial muscles and observations, which is biased, time-consuming and requires a long training process and certification. The main objective of this work is to establish a computational framework for facial recognition systems for automatic recognition NHP facial expressions from standard video recordings with minimal assumptions. The suggested technology consists of: 1)a tailored facial image registration for NHPs; 2)a two-layers unsupervised clustering algorithm that forms an ordered dictionary of facial images for different facial segments; 3)extract dynamical temporal-spectral features;, and recognize dynamic facial expressions. The feasibility of the methods was verified using video recordings of an NHP under various behavioral conditions, recognizing typical NHP facial expressions in the wild. The results were compared to three human experts, and show an agreement of more than 82%. This work is the first attempt for efficient automatic recognition of facial expressions in NHPs using minimal assumptions about the physiology of facial expressions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Facial expressions play a significant role in the expression of emotional states, such as fear, surprise, and happiness in humans and other animals. The current systems for recognizing animal facial expression model in Non-human primates (NHPs) are currently limited to manual decoding of the facial muscles and observations, which is biased, time-consuming and requires a long training process and certification. The main objective of this work is to establish a computational framework for facial recognition systems for automatic recognition NHP facial expressions from standard video recordings with minimal assumptions. The suggested technology consists of: 1)a tailored facial image registration for NHPs; 2)a two-layers unsupervised clustering algorithm that forms an ordered dictionary of facial images for different facial segments; 3)extract dynamical temporal-spectral features;, and recognize dynamic facial expressions. The feasibility of the methods was verified using video recordings of an NHP under various behavioral conditions, recognizing typical NHP facial expressions in the wild. The results were compared to three human experts, and show an agreement of more than 82%. This work is the first attempt for efficient automatic recognition of facial expressions in NHPs using minimal assumptions about the physiology of facial expressions.",
"fno": "1034c810",
"keywords": [
"Face Recognition",
"Streaming Media",
"Muscles",
"Dictionaries",
"Decoding",
"Computational Modeling",
"Lips"
],
"authors": [
{
"affiliation": null,
"fullName": "Gaddi Blumrosen",
"givenName": "Gaddi",
"surname": "Blumrosen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "David Hawellek",
"givenName": "David",
"surname": "Hawellek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bijan Pesaran",
"givenName": "Bijan",
"surname": "Pesaran",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2810-2819",
"year": "2017",
"issn": "2473-9944",
"isbn": "978-1-5386-1034-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1034c805",
"articleId": "12OmNyNQSOG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1034c820",
"articleId": "12OmNBpEeJq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2003/7965/1/7965177",
"title": "Recognizing facial expressions using active textures with wrinkles",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2003/7965177/12OmNAS9ztR",
"parentPublication": {
"id": "proceedings/icme/2003/7965/1",
"title": "2003 International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2008/2153/0/04813317",
"title": "Emotional contagion for unseen bodily expressions: Evidence from facial EMG",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813317/12OmNAmmuOV",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6270/2/00576873",
"title": "Recognizing human facial expressions in a potential field",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576873/12OmNAnuTEc",
"parentPublication": {
"id": "proceedings/icpr/1994/6270/2",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437b452",
"title": "Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437b452/12OmNrYlmDm",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2005/9385/0/01577290",
"title": "Recognizing facial expressions at low resolution",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2005/01577290/12OmNvAAtlL",
"parentPublication": {
"id": "proceedings/avss/2005/9385/0",
"title": "IEEE Conference on Advanced Video and Signal Based Surveillance, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576429",
"title": "Recognizing facial expressions by spatio-temporal analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576429/12OmNyUnEI8",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2014/03/06778017",
"title": "Design of a Wearable Device for Reading Positive Expressions from Facial EMG Signals",
"doi": null,
"abstractUrl": "/journal/ta/2014/03/06778017/13rRUyY2937",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/culture-and-computing/2017/1135/0/08227348",
"title": "Analyzing Facial Expressions and Hand Gestures in Filipino Students' Programming Sessions",
"doi": null,
"abstractUrl": "/proceedings-article/culture-and-computing/2017/08227348/17D45XDIXVK",
"parentPublication": {
"id": "proceedings/culture-and-computing/2017/1135/0",
"title": "2017 International Conference on Culture and Computing (Culture and Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2019/0089/0/08756541",
"title": "A Boost in Revealing Subtle Facial Expressions: A Consolidated Eulerian Framework",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2019/08756541/1bzYwpdafPa",
"parentPublication": {
"id": "proceedings/fg/2019/0089/0",
"title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a194",
"title": "SAMM Long Videos: A Spontaneous Facial Micro- and Macro-Expressions Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a194/1kecI15Jmk8",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNykCcdi",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrYlmDm",
"doi": "10.1109/CVPRW.2016.182",
"title": "Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions",
"normalizedTitle": "Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions",
"abstract": "Automatic facial expression recognition (FER) is an important component of affect-aware technologies. Because of the lack of labeled spontaneous data, majority of existing automated FER systems were trained on posed facial expressions, however in real-world applications we deal with (subtle) spontaneous facial expression. This paper introduces an extension of DISFA, a previously released and well-accepted face dataset. Extended DISFA (DISFA+) has the following features: 1) it contains a large set of posed and spontaneous facial expressions data for a same group of individuals, 2) it provides the manually labeled framebased annotations of 5-level intensity of twelve FACS facial actions, 3) it provides meta data (i.e. facial landmark points in addition to the self-report of each individual regarding every posed facial expression). This paper introduces and employs DISFA+, to analyze and compare temporal patterns and dynamic characteristics of posed and spontaneous facial expressions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Automatic facial expression recognition (FER) is an important component of affect-aware technologies. Because of the lack of labeled spontaneous data, majority of existing automated FER systems were trained on posed facial expressions, however in real-world applications we deal with (subtle) spontaneous facial expression. This paper introduces an extension of DISFA, a previously released and well-accepted face dataset. Extended DISFA (DISFA+) has the following features: 1) it contains a large set of posed and spontaneous facial expressions data for a same group of individuals, 2) it provides the manually labeled framebased annotations of 5-level intensity of twelve FACS facial actions, 3) it provides meta data (i.e. facial landmark points in addition to the self-report of each individual regarding every posed facial expression). This paper introduces and employs DISFA+, to analyze and compare temporal patterns and dynamic characteristics of posed and spontaneous facial expressions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Automatic facial expression recognition (FER) is an important component of affect-aware technologies. Because of the lack of labeled spontaneous data, majority of existing automated FER systems were trained on posed facial expressions, however in real-world applications we deal with (subtle) spontaneous facial expression. This paper introduces an extension of DISFA, a previously released and well-accepted face dataset. Extended DISFA (DISFA+) has the following features: 1) it contains a large set of posed and spontaneous facial expressions data for a same group of individuals, 2) it provides the manually labeled framebased annotations of 5-level intensity of twelve FACS facial actions, 3) it provides meta data (i.e. facial landmark points in addition to the self-report of each individual regarding every posed facial expression). This paper introduces and employs DISFA+, to analyze and compare temporal patterns and dynamic characteristics of posed and spontaneous facial expressions.",
"fno": "1437b452",
"keywords": [
"Gold",
"Facial Muscles",
"Pain",
"Databases",
"Software",
"Lips"
],
"authors": [
{
"affiliation": null,
"fullName": "Mohammad Mavadati",
"givenName": "Mohammad",
"surname": "Mavadati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Peyten Sanger",
"givenName": "Peyten",
"surname": "Sanger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mohammad H. Mahoor",
"givenName": "Mohammad H.",
"surname": "Mahoor",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1452-1459",
"year": "2016",
"issn": "2160-7516",
"isbn": "978-1-5090-1437-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1437b443",
"articleId": "12OmNy50gd9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1437b460",
"articleId": "12OmNBqv2bq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130343",
"title": "Differentiating spontaneous from posed facial expressions within a generic facial expression recognition framework",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130343/12OmNAKM01b",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2017/0563/0/08273651",
"title": "Spontaneous and posed smile recognition based on spatial and temporal patterns of facial EMG",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273651/12OmNBigFqV",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imvip/2009/3796/0/3796a139",
"title": "Estimation of the Temporal Dynamics of Posed and Spontaneous Facial Expression Formation Using LLE",
"doi": null,
"abstractUrl": "/proceedings-article/imvip/2009/3796a139/12OmNrJRPl9",
"parentPublication": {
"id": "proceedings/imvip/2009/3796/0",
"title": "International Machine Vision and Image Processing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460330",
"title": "Posed and spontaneous expression distinguishment from infrared thermal images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460330/12OmNwIpNjc",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2013/5545/0/06553746",
"title": "Nebula feature: A space-time feature for posed and spontaneous 4D facial behavior analysis",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2013/06553746/12OmNwdtw7o",
"parentPublication": {
"id": "proceedings/fg/2013/5545/0",
"title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344637",
"title": "Posed and spontaneous facial expression differentiation using deep Boltzmann machines",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344637/12OmNxcMSdU",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a079",
"title": "Analyses of the Differences between Posed and Spontaneous Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a079/12OmNzhnadz",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2013/02/tta2013020151",
"title": "DISFA: A Spontaneous Facial Action Intensity Database",
"doi": null,
"abstractUrl": "/journal/ta/2013/02/tta2013020151/13rRUEgarrl",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a194",
"title": "SAMM Long Videos: A Spontaneous Facial Micro- and Macro-Expressions Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a194/1kecI15Jmk8",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2020/7397/0/739700a017",
"title": "Classification of Posed Smiles and Spontaneous Smiles with LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2020/739700a017/1tGcrHqoiTC",
"parentPublication": {
"id": "proceedings/iiai-aai/2020/7397/0",
"title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKa5Tk",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrkjVeg",
"doi": "10.1109/ICME.2008.4607595",
"title": "Muscle-driven modeling of wrinkles for 3D facial expressions",
"normalizedTitle": "Muscle-driven modeling of wrinkles for 3D facial expressions",
"abstract": "Facial expression animation considering wrinkle formation is an aspiring goal and a challenging task. This paper presents a new geometric wrinkle model that is defined according to facial muscle anatomy for efficient simulation of dynamic wrinkles within expressions. Our method is applied to an anatomy-based face model with a multi-layer structure of skin, muscles, and skull. The location and orientation of the wrinkles are automatically determined based on muscle contraction and its influence on the skin. Corresponding to two types of facial muscles, the geometric wrinkle model governs evolution of wrinkle amplitude in the local deformed face regions. It provides intuitive parameters for easy control over wrinkle characteristics by taking into account the properties of real wrinkles. Results show that this method enables realistic wrinkles synchronized with facial movements to be dynamically simulated and rendered at an interactive rate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Facial expression animation considering wrinkle formation is an aspiring goal and a challenging task. This paper presents a new geometric wrinkle model that is defined according to facial muscle anatomy for efficient simulation of dynamic wrinkles within expressions. Our method is applied to an anatomy-based face model with a multi-layer structure of skin, muscles, and skull. The location and orientation of the wrinkles are automatically determined based on muscle contraction and its influence on the skin. Corresponding to two types of facial muscles, the geometric wrinkle model governs evolution of wrinkle amplitude in the local deformed face regions. It provides intuitive parameters for easy control over wrinkle characteristics by taking into account the properties of real wrinkles. Results show that this method enables realistic wrinkles synchronized with facial movements to be dynamically simulated and rendered at an interactive rate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Facial expression animation considering wrinkle formation is an aspiring goal and a challenging task. This paper presents a new geometric wrinkle model that is defined according to facial muscle anatomy for efficient simulation of dynamic wrinkles within expressions. Our method is applied to an anatomy-based face model with a multi-layer structure of skin, muscles, and skull. The location and orientation of the wrinkles are automatically determined based on muscle contraction and its influence on the skin. Corresponding to two types of facial muscles, the geometric wrinkle model governs evolution of wrinkle amplitude in the local deformed face regions. It provides intuitive parameters for easy control over wrinkle characteristics by taking into account the properties of real wrinkles. Results show that this method enables realistic wrinkles synchronized with facial movements to be dynamically simulated and rendered at an interactive rate.",
"fno": "04607595",
"keywords": [
"Computer Animation",
"Rendering Computer Graphics",
"Muscle Driven Modeling",
"3 D Facial Expressions",
"Facial Expression Animation",
"Wrinkle Formation",
"Facial Muscle Anatomy",
"Muscle Contraction",
"Muscles",
"Skin",
"Chromium",
"Computational Modeling",
"Shape",
"Face",
"Animation"
],
"authors": [
{
"affiliation": "Institute of High Performance Computing, Singapore",
"fullName": "Yu Zhang",
"givenName": null,
"surname": "Yu Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-06-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1945-7871",
"isbn": "978-1-4244-2570-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04607594",
"articleId": "12OmNzZmZiY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04607596",
"articleId": "12OmNzSyCgs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2003/7965/1/7965177",
"title": "Recognizing facial expressions using active textures with wrinkles",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2003/7965177/12OmNAS9ztR",
"parentPublication": {
"id": "proceedings/icme/2003/7965/1",
"title": "2003 International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2007/2929/0/29290874",
"title": "Modeling Expressive Wrinkles of Face For Animation",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2007/29290874/12OmNBV9Iif",
"parentPublication": {
"id": "proceedings/icig/2007/2929/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2002/1784/0/17840166",
"title": "A Simple Method for Modeling Wrinkles on Human Skin",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840166/12OmNBlXs80",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2005/9330/0/01500342",
"title": "Keynote address",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2005/01500342/12OmNvlxJt2",
"parentPublication": {
"id": "proceedings/cgi/2005/9330/0",
"title": "Computer Graphics International 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b651",
"title": "Extraction and Selection of Muscle Based Features for Facial Expression Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b651/12OmNx4Q6Bx",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2013/5545/0/06553719",
"title": "Assessment of facial wrinkles as a soft biometrics",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2013/06553719/12OmNzCF4RU",
"parentPublication": {
"id": "proceedings/fg/2013/5545/0",
"title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1999/0167/0/01670210",
"title": "Skin Aging Estimation by Facial Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1999/01670210/12OmNzRZpUr",
"parentPublication": {
"id": "proceedings/ca/1999/0167/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1996/7588/0/75880090",
"title": "Simulation of Static and Dynamic Wrinkles of Skin",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1996/75880090/12OmNzYeAO0",
"parentPublication": {
"id": "proceedings/ca/1996/7588/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2005/9330/0/01500352",
"title": "Realistic and efficient wrinkle simulation using an anatomy-based face model with adaptive refinement",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2005/01500352/12OmNzvz6DE",
"parentPublication": {
"id": "proceedings/cgi/2005/9330/0",
"title": "Computer Graphics International 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06872553",
"title": "Generating Facial Expressions Using an Anatomically Accurate Biomechanical Model",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06872553/13rRUwI5Uga",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCvLY1R",
"title": "Proceedings of Computer Animation '94",
"acronym": "ca",
"groupId": "1000121",
"volume": "0",
"displayVolume": "0",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwK7o9I",
"doi": "10.1109/CA.1994.324007",
"title": "Modeling of vascular expressions in facial animation",
"normalizedTitle": "Modeling of vascular expressions in facial animation",
"abstract": "Most of the earlier and existing computational models for facial animation consider only muscular expressions. We address and emphasize issues related to modeling of vascular expression. The proposed model enables visual characteristics such as skin color to change with time and provide visual clues for emotions like paleness and blushing. An emotion is defined as a function of two signals in time, one for spatial changes (muscular effects) and the other for the color (vascular effects). For different regions of the face, the atomic vascular action is modeled as an image mask with its shape and the shade defined by Bezier functions to manipulate the texture image.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most of the earlier and existing computational models for facial animation consider only muscular expressions. We address and emphasize issues related to modeling of vascular expression. The proposed model enables visual characteristics such as skin color to change with time and provide visual clues for emotions like paleness and blushing. An emotion is defined as a function of two signals in time, one for spatial changes (muscular effects) and the other for the color (vascular effects). For different regions of the face, the atomic vascular action is modeled as an image mask with its shape and the shade defined by Bezier functions to manipulate the texture image.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most of the earlier and existing computational models for facial animation consider only muscular expressions. We address and emphasize issues related to modeling of vascular expression. The proposed model enables visual characteristics such as skin color to change with time and provide visual clues for emotions like paleness and blushing. An emotion is defined as a function of two signals in time, one for spatial changes (muscular effects) and the other for the color (vascular effects). For different regions of the face, the atomic vascular action is modeled as an image mask with its shape and the shade defined by Bezier functions to manipulate the texture image.",
"fno": "00324007",
"keywords": [
"Image Texture",
"Computer Animation",
"Colour",
"Computational Geometry",
"Vascular Expressions",
"Facial Animation",
"Computational Models",
"Visual Characteristics",
"Skin Color",
"Visual Clues",
"Emotions",
"Paleness",
"Blushing",
"Spatial Changes",
"Atomic Vascular Action",
"Image Mask",
"Bezier Functions",
"Texture Image",
"Facial Animation",
"Skin",
"Computational Modeling",
"Face Detection",
"Context Modeling",
"Shape",
"Muscles",
"Neuromuscular",
"Face Recognition",
"Broadcasting"
],
"authors": [
{
"affiliation": "MIRAlab, Geneva Univ., Switzerland",
"fullName": "P. Kalra",
"givenName": "P.",
"surname": "Kalra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIRAlab, Geneva Univ., Switzerland",
"fullName": "N. Magnenat-Thalmann",
"givenName": "N.",
"surname": "Magnenat-Thalmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ca",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "50-58, 201",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00324006",
"articleId": "12OmNwErpsG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00324008",
"articleId": "12OmNzcPAMk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2008/2570/0/04607595",
"title": "Muscle-driven modeling of wrinkles for 3D facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607595/12OmNrkjVeg",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2014/5179/0/06850763",
"title": "Facial features detection in color images based on skin color segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850763/12OmNsd6vjP",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00324006",
"title": "Langwidere: a new facial animation system",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00324006/12OmNwErpsG",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b651",
"title": "Extraction and Selection of Muscle Based Features for Facial Expression Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b651/12OmNx4Q6Bx",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iita/2008/3497/1/3497a457",
"title": "Face Tracking in Video Sequences Using Particle Filter Based on Skin Color Model and Facial Contour",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2008/3497a457/12OmNx9WT1h",
"parentPublication": {
"id": "proceedings/iita/2008/3497/3",
"title": "2008 Second International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2001/7237/0/00982374",
"title": "A physically-based model with adaptive refinement for facial animation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2001/00982374/12OmNxRWI7R",
"parentPublication": {
"id": "proceedings/ca/2001/7237/0",
"title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2007/1834/0/04458169",
"title": "Automatic Synthesis of Realistic Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2007/04458169/12OmNxvO08Q",
"parentPublication": {
"id": "proceedings/isspit/2007/1834/0",
"title": "2007 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06872553",
"title": "Generating Facial Expressions Using an Anatomically Accurate Biomechanical Model",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06872553/13rRUwI5Uga",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2014/03/06778017",
"title": "Design of a Wearable Device for Reading Positive Expressions from Facial EMG Signals",
"doi": null,
"abstractUrl": "/journal/ta/2014/03/06778017/13rRUyY2937",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2019/0089/0/08756541",
"title": "A Boost in Revealing Subtle Facial Expressions: A Consolidated Eulerian Framework",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2019/08756541/1bzYwpdafPa",
"parentPublication": {
"id": "proceedings/fg/2019/0089/0",
"title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwErpHy",
"title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation",
"acronym": "ca",
"groupId": "1000121",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwcUk1f",
"doi": "10.1109/CA.2001.982372",
"title": "Analysis and synthesis of facial expressions with hand-generated muscle actuation basis",
"normalizedTitle": "Analysis and synthesis of facial expressions with hand-generated muscle actuation basis",
"abstract": "We present a performance-driven facial animation system for analyzing captured expressions to find muscle actuation and synthesizing expressions with the actuation values. A significantly different approach of our work is that we let artists sculpt the initial draft of the actuation basis: the basic facial shapes corresponding to the isolated actuation of individual muscles, instead of calculating skin surface deformation entirely, relying on mathematical models such as finite element methods. We synthesize expressions by linear combinations of the basis elements, and analyze expressions by finding the weights for the combinations. Even though the hand-generated actuation basis represents the essence of the subject's characteristic expressions, it is not accurate enough to be used in the subsequent computational procedures. We also describe an iterative algorithm to increase the accuracy of the actuation basis. The experimental results suggest that our artist-in-the-loop method produces a more predictable and controllable outcome than pure mathematical models, and thus can be a quite useful tool in animation productions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a performance-driven facial animation system for analyzing captured expressions to find muscle actuation and synthesizing expressions with the actuation values. A significantly different approach of our work is that we let artists sculpt the initial draft of the actuation basis: the basic facial shapes corresponding to the isolated actuation of individual muscles, instead of calculating skin surface deformation entirely, relying on mathematical models such as finite element methods. We synthesize expressions by linear combinations of the basis elements, and analyze expressions by finding the weights for the combinations. Even though the hand-generated actuation basis represents the essence of the subject's characteristic expressions, it is not accurate enough to be used in the subsequent computational procedures. We also describe an iterative algorithm to increase the accuracy of the actuation basis. The experimental results suggest that our artist-in-the-loop method produces a more predictable and controllable outcome than pure mathematical models, and thus can be a quite useful tool in animation productions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a performance-driven facial animation system for analyzing captured expressions to find muscle actuation and synthesizing expressions with the actuation values. A significantly different approach of our work is that we let artists sculpt the initial draft of the actuation basis: the basic facial shapes corresponding to the isolated actuation of individual muscles, instead of calculating skin surface deformation entirely, relying on mathematical models such as finite element methods. We synthesize expressions by linear combinations of the basis elements, and analyze expressions by finding the weights for the combinations. Even though the hand-generated actuation basis represents the essence of the subject's characteristic expressions, it is not accurate enough to be used in the subsequent computational procedures. We also describe an iterative algorithm to increase the accuracy of the actuation basis. The experimental results suggest that our artist-in-the-loop method produces a more predictable and controllable outcome than pure mathematical models, and thus can be a quite useful tool in animation productions.",
"fno": "00982372",
"keywords": [
"Computer Animation",
"Muscle",
"Iterative Methods",
"Art",
"Facial Expression Analysis",
"Facial Expression Synthesis",
"Hand Generated Muscle Actuation Basis",
"Performance Driven Facial Animation System",
"Captured Expressions",
"Muscle Actuation",
"Actuation Values",
"Basic Facial Shapes",
"Isolated Actuation",
"Skin Surface Deformation",
"Mathematical Models",
"Finite Element Methods",
"Linear Combinations",
"Hand Generated Actuation Basis",
"Characteristic Expressions",
"Computational Procedures",
"Iterative Algorithm",
"Actuation Basis",
"Artist In The Loop Method",
"Controllable Outcome",
"Pure Mathematical Models",
"Animation Productions",
"Muscles",
"Mathematical Model",
"Shape",
"Facial Animation",
"Finite Element Methods",
"Skin",
"Production",
"Computer Science",
"Iterative Algorithms",
"Humans"
],
"authors": [
{
"affiliation": "Sch. of Electr. Eng. & Comput. Sci., Seoul Nat. Univ., South Korea",
"fullName": "Byoungwon Choe",
"givenName": null,
"surname": "Byoungwon Choe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hyeong-Seok Ko",
"givenName": null,
"surname": "Hyeong-Seok Ko",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ca",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-01-01T00:00:00",
"pubType": "proceedings",
"pages": "12,13,14,15,16,17,18,19",
"year": "2001",
"issn": "1087-4844",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00982371",
"articleId": "12OmNCcKQrq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00982373",
"articleId": "12OmNvnfke8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/kse/2009/3846/0/3846a081",
"title": "Fast and Realistic 2D Facial Animation Based on Image Warping",
"doi": null,
"abstractUrl": "/proceedings-article/kse/2009/3846a081/12OmNqGA59e",
"parentPublication": {
"id": "proceedings/kse/2009/3846/0",
"title": "Knowledge and Systems Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607595",
"title": "Muscle-driven modeling of wrinkles for 3D facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607595/12OmNrkjVeg",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-vis/2008/3271/0/3271a135",
"title": "Visualisation Tool for Representing Synthetic Facial Emotional Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/iv-vis/2008/3271a135/12OmNvRU0qD",
"parentPublication": {
"id": "proceedings/iv-vis/2008/3271/0",
"title": "Visualisation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00324007",
"title": "Modeling of vascular expressions in facial animation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00324007/12OmNwK7o9I",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284814",
"title": "An Efficient Markerless Method for Resynthesizing Facial Animation on an Anatomy-Based Model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284814/12OmNx6PiFO",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mnrao/1994/6435/0/00346257",
"title": "Tracking facial motion",
"doi": null,
"abstractUrl": "/proceedings-article/mnrao/1994/00346257/12OmNx7XH2q",
"parentPublication": {
"id": "proceedings/mnrao/1994/6435/0",
"title": "Proceedings of 1994 IEEE Workshop on Motion of Non-rigid and Articulated Objects",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2010/4166/0/4166a009",
"title": "Expressive MPEG-4 Facial Animation Using Quadratic Deformation Models",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2010/4166a009/12OmNxH9Xgx",
"parentPublication": {
"id": "proceedings/cgiv/2010/4166/0",
"title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a270",
"title": "HapFACS: An Open Source API/Software to Generate FACS-Based Expressions for ECAs Animation and for Corpus Generation",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a270/12OmNzXFozx",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/viz/2009/3734/0/3734a061",
"title": "Considerations for Believable Emotional Facial Expression Animation",
"doi": null,
"abstractUrl": "/proceedings-article/viz/2009/3734a061/12OmNzZmZrJ",
"parentPublication": {
"id": "proceedings/viz/2009/3734/0",
"title": "Visualisation, International Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900c475",
"title": "Three Stream Graph Attention Network using Dynamic Patch Selection for the classification of micro-expressions",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900c475/1G571YhxaiQ",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwErpHy",
"title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation",
"acronym": "ca",
"groupId": "1000121",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxRF74d",
"doi": "10.1109/CA.2001.982390",
"title": "Interactive modeling of the human musculature",
"normalizedTitle": "Interactive modeling of the human musculature",
"abstract": "In this paper, we extend our previous work (Proc. Computer Animation and Simulation, pp. 125-135, Aug. 2000) and propose a muscle model that is suitable for computer graphics based on physiological and anatomical considerations. Muscle motion and deformation is automatically derived from one or several action lines, each action line being deformed by a 1D mass-spring system. The resulting model is fast, can accommodate most superficial human muscles, and could easily be integrated into current modeling packages. Example animations can be found at .",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we extend our previous work (Proc. Computer Animation and Simulation, pp. 125-135, Aug. 2000) and propose a muscle model that is suitable for computer graphics based on physiological and anatomical considerations. Muscle motion and deformation is automatically derived from one or several action lines, each action line being deformed by a 1D mass-spring system. The resulting model is fast, can accommodate most superficial human muscles, and could easily be integrated into current modeling packages. Example animations can be found at .",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we extend our previous work (Proc. Computer Animation and Simulation, pp. 125-135, Aug. 2000) and propose a muscle model that is suitable for computer graphics based on physiological and anatomical considerations. Muscle motion and deformation is automatically derived from one or several action lines, each action line being deformed by a 1D mass-spring system. The resulting model is fast, can accommodate most superficial human muscles, and could easily be integrated into current modeling packages. Example animations can be found at .",
"fno": "00982390",
"keywords": [
"Muscle",
"Interactive Systems",
"Digital Simulation",
"Biomechanics",
"Biology Computing",
"Computer Animation",
"Deformation",
"Physiological Models",
"Interactive Modeling",
"Human Musculature",
"Muscle Model",
"Computer Graphics",
"Physiological Considerations",
"Anatomical Considerations",
"Muscle Motion",
"Muscle Deformation",
"Action Lines",
"1 D Mass Spring System",
"Modeling Packages",
"Animation",
"Humans",
"Muscles",
"Skin",
"Ellipsoids",
"Computer Graphics",
"Shape",
"Deformable Models",
"Bones",
"Biological System Modeling",
"Solid Modeling"
],
"authors": [
{
"affiliation": "Comput. Graphics Lab., Swiss Fed. Inst. of Technol., Switzerland",
"fullName": "A. Aubel",
"givenName": "A.",
"surname": "Aubel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "D. Thalmann",
"givenName": "D.",
"surname": "Thalmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ca",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-01-01T00:00:00",
"pubType": "proceedings",
"pages": "167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255",
"year": "2001",
"issn": "1087-4844",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00982389",
"articleId": "12OmNyQYt7H",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00982391",
"articleId": "12OmNzmtWw6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2015/7568/0/7568a480",
"title": "Senescence: An Age-Based Character Simulation Framework",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a480/12OmNAOKnW2",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607595",
"title": "Muscle-driven modeling of wrinkles for 3D facial expressions",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607595/12OmNrkjVeg",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2010/8303/0/05703931",
"title": "Modeling of glucose transport in skeletal muscle",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2010/05703931/12OmNwtWfLC",
"parentPublication": {
"id": "proceedings/bibmw/2010/8303/0",
"title": "2010 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759655",
"title": "Automatic skeleton generation and character skinning",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759655/12OmNyqzM1p",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2013/5159/0/06726376",
"title": "The Research and Application of sEMG in Massage Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2013/06726376/12OmNyrIawG",
"parentPublication": {
"id": "proceedings/msn/2013/5159/0",
"title": "2013 Ninth International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890595",
"title": "Modeling a realistic 3D physiological tongue for visual speech synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890595/12OmNyuy9UE",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dim/2005/2327/0/01443264",
"title": "From range data to animated anatomy-based faces: a model adaptation method",
"doi": null,
"abstractUrl": "/proceedings-article/dim/2005/01443264/12OmNzcPAGN",
"parentPublication": {
"id": "proceedings/dim/2005/2327/0",
"title": "Proceedings. Fifth International Conference on 3-D Digital Imaging and Modeling",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2017/10/mco2017100028",
"title": "Interactive Systems Based on Electrical Muscle Stimulation",
"doi": null,
"abstractUrl": "/magazine/co/2017/10/mco2017100028/13rRUILc8aJ",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2021/3965/0/396500a155",
"title": "Key Generation of Biomedical Implanted Antennas Through Artificial Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2021/396500a155/1AIMGpPaQZG",
"parentPublication": {
"id": "proceedings/chase/2021/3965/0",
"title": "2021 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/2005/2327/0/01443264",
"title": "From range data to animated anatomy-based faces: a model adaptation method",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/2005/01443264/1h0H3TUJe9y",
"parentPublication": {
"id": "proceedings/3dim/2005/2327/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAR1b0Z",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxveNUc",
"doi": "10.1109/CVPRW.2017.280",
"title": "DyadGAN: Generating Facial Expressions in Dyadic Interactions",
"normalizedTitle": "DyadGAN: Generating Facial Expressions in Dyadic Interactions",
"abstract": "Generative Adversarial Networks (GANs) have been shown to produce synthetic face images of compelling realism. In this work, we present a conditional GAN approach to generate contextually valid facial expressions in dyadic human interactions. In contrast to previous work employing conditions related to facial attributes of generated identities, we focused on dyads in an attempt to model the relationship and influence of one person’s facial expressions in the reaction of the other. To this end, we introduced a two level optimization of GANs in interviewerinterviewee dyadic interactions. In the first stage we generate face sketches of the interviewer conditioned on facial expressions of the interviewee. The second stage synthesizes complete face images conditioned on the face sketches generated in the first stage. We demonstrated that our model is effective at generating visually compelling face images in dyadic interactions. Moreover we quantitatively showed that the facial expressions depicted in the generated interviewer face images reflect valid emotional reactions to the interviewee behavior.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Generative Adversarial Networks (GANs) have been shown to produce synthetic face images of compelling realism. In this work, we present a conditional GAN approach to generate contextually valid facial expressions in dyadic human interactions. In contrast to previous work employing conditions related to facial attributes of generated identities, we focused on dyads in an attempt to model the relationship and influence of one person’s facial expressions in the reaction of the other. To this end, we introduced a two level optimization of GANs in interviewerinterviewee dyadic interactions. In the first stage we generate face sketches of the interviewer conditioned on facial expressions of the interviewee. The second stage synthesizes complete face images conditioned on the face sketches generated in the first stage. We demonstrated that our model is effective at generating visually compelling face images in dyadic interactions. Moreover we quantitatively showed that the facial expressions depicted in the generated interviewer face images reflect valid emotional reactions to the interviewee behavior.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Generative Adversarial Networks (GANs) have been shown to produce synthetic face images of compelling realism. In this work, we present a conditional GAN approach to generate contextually valid facial expressions in dyadic human interactions. In contrast to previous work employing conditions related to facial attributes of generated identities, we focused on dyads in an attempt to model the relationship and influence of one person’s facial expressions in the reaction of the other. To this end, we introduced a two level optimization of GANs in interviewerinterviewee dyadic interactions. In the first stage we generate face sketches of the interviewer conditioned on facial expressions of the interviewee. The second stage synthesizes complete face images conditioned on the face sketches generated in the first stage. We demonstrated that our model is effective at generating visually compelling face images in dyadic interactions. Moreover we quantitatively showed that the facial expressions depicted in the generated interviewer face images reflect valid emotional reactions to the interviewee behavior.",
"fno": "0733c259",
"keywords": [
"Face",
"Gallium Nitride",
"Generators",
"Interviews",
"Biological System Modeling",
"Avatars",
"Shape"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuchi Huang",
"givenName": "Yuchi",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Saad M. Khan",
"givenName": "Saad M.",
"surname": "Khan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "2259-2266",
"year": "2017",
"issn": "2160-7516",
"isbn": "978-1-5386-0733-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0733c249",
"articleId": "12OmNzcxYW8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0733c267",
"articleId": "12OmNAgoV94",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2017/0563/0/08273626",
"title": "Photorealistic facial expression synthesis by the conditional difference adversarial autoencoder",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273626/12OmNvD8RE4",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2006/0366/0/04036537",
"title": "Synthesis and Control of High Resolution Facial Expressions for Visual Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2006/04036537/12OmNvjyy3Q",
"parentPublication": {
"id": "proceedings/icme/2006/0366/0",
"title": "2006 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c168",
"title": "Facial Expression Recognition by De-expression Residue Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c168/17D45WIXbNV",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545831",
"title": "Dynamic Facial Expression Synthesis Driven by Deformable Semantic Parts",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545831/17D45WKWnJ1",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000d359",
"title": "Joint Pose and Expression Modeling for Facial Expression Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000d359/17D45XeKgpo",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b626",
"title": "Eyemotion: Classifying Facial Expressions in VR Using Eye-Tracking Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b626/18j8FIomLfi",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200b198",
"title": "SwitchGAN for Multi-domain Facial Image Translation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200b198/1cdOJ82sYU0",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925461",
"title": "Towards Facial De-Expression and Expression Recognition in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925461/1fHGIFJatWw",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/02/09117185",
"title": "Dynamic Facial Expression Generation on Hilbert Hypersphere With Conditional Wasserstein Generative Adversarial Nets",
"doi": null,
"abstractUrl": "/journal/tp/2022/02/09117185/1kGfN3QogZq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a082",
"title": "Unmasking Communication Partners: A Low-Cost AI Solution for Digitally Removing Head-Mounted Displays in VR-Based Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a082/1qpzzJaiYqk",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1bzYnKROnN6",
"title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1bzYwpdafPa",
"doi": "10.1109/FG.2019.8756541",
"title": "A Boost in Revealing Subtle Facial Expressions: A Consolidated Eulerian Framework",
"normalizedTitle": "A Boost in Revealing Subtle Facial Expressions: A Consolidated Eulerian Framework",
"abstract": "Facial Micro-expression Recognition (MER) distinguishes the underlying emotional states of spontaneous subtle facial expressions. Automatic MER is challenging because that the intensity of subtle facial muscle movement is extremely low and the duration of ME is transient.Recent works adopt motion magnification or temporal interpolation to resolve these issues. Nevertheless, existing works divide them into two separate modules due to their non-linearity. Though such operation eases the difficulty in implementation, it ignores their underlying connections and thus results in inevitable losses in both accuracy and speed. Instead, in this paper, we propose a consolidated Eulerian framework to reveal the subtle facial movements. It expands the temporal duration and amplifies the muscle movements in micro-expressions simultaneously. Compared to existing approaches, the proposed method can not only process ME clips more efficiently but also make subtle ME movements more distinguishable. Experiments on two public MER databases indicate that our model outperforms the state-of-the-art in both speed and accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Facial Micro-expression Recognition (MER) distinguishes the underlying emotional states of spontaneous subtle facial expressions. Automatic MER is challenging because that the intensity of subtle facial muscle movement is extremely low and the duration of ME is transient.Recent works adopt motion magnification or temporal interpolation to resolve these issues. Nevertheless, existing works divide them into two separate modules due to their non-linearity. Though such operation eases the difficulty in implementation, it ignores their underlying connections and thus results in inevitable losses in both accuracy and speed. Instead, in this paper, we propose a consolidated Eulerian framework to reveal the subtle facial movements. It expands the temporal duration and amplifies the muscle movements in micro-expressions simultaneously. Compared to existing approaches, the proposed method can not only process ME clips more efficiently but also make subtle ME movements more distinguishable. Experiments on two public MER databases indicate that our model outperforms the state-of-the-art in both speed and accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Facial Micro-expression Recognition (MER) distinguishes the underlying emotional states of spontaneous subtle facial expressions. Automatic MER is challenging because that the intensity of subtle facial muscle movement is extremely low and the duration of ME is transient.Recent works adopt motion magnification or temporal interpolation to resolve these issues. Nevertheless, existing works divide them into two separate modules due to their non-linearity. Though such operation eases the difficulty in implementation, it ignores their underlying connections and thus results in inevitable losses in both accuracy and speed. Instead, in this paper, we propose a consolidated Eulerian framework to reveal the subtle facial movements. It expands the temporal duration and amplifies the muscle movements in micro-expressions simultaneously. Compared to existing approaches, the proposed method can not only process ME clips more efficiently but also make subtle ME movements more distinguishable. Experiments on two public MER databases indicate that our model outperforms the state-of-the-art in both speed and accuracy.",
"fno": "08756541",
"keywords": [
"Emotion Recognition",
"Face Recognition",
"Interpolation",
"Muscle",
"Visual Databases",
"Consolidated Eulerian Framework",
"Spontaneous Subtle Facial Expressions",
"Automatic MER",
"Subtle Facial Muscle Movement",
"Motion Magnification",
"Temporal Interpolation",
"Public MER Databases",
"Facial Microexpression Recognition",
"Emotional States",
"Computational Modeling",
"Interpolation",
"Facial Muscles",
"Databases",
"Face Recognition",
"Feature Extraction",
"Muscles"
],
"authors": [
{
"affiliation": "Center for Machine Vision and Signal Processing, University of Oulu, Finland",
"fullName": "Wei Peng",
"givenName": "Wei",
"surname": "Peng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xian Jiaotong University, Xian, P. R. China",
"fullName": "Xiaopeng Hong",
"givenName": "Xiaopeng",
"surname": "Hong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Machine Vision and Signal Processing, University of Oulu, Finland",
"fullName": "Yingyue Xu",
"givenName": "Yingyue",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Machine Vision and Signal Processing, University of Oulu, Finland",
"fullName": "Guoying Zhao",
"givenName": "Guoying",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-0089-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08756546",
"articleId": "1bzYxcHC3UA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08756625",
"articleId": "1bzYpFur9YI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2016/1437/0/1437b452",
"title": "Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437b452/12OmNrYlmDm",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/02/ttp2010020258",
"title": "A Unified Probabilistic Framework for Spontaneous Facial Action Modeling and Understanding",
"doi": null,
"abstractUrl": "/journal/tp/2010/02/ttp2010020258/13rRUwInvC2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09684697",
"title": "An Overview of Facial Micro-Expression Analysis: Data, Methodology and Challenge",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09684697/1Agmk0yf2SY",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900c475",
"title": "Three Stream Graph Attention Network using Dynamic Patch Selection for the classification of micro-expressions",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900c475/1G571YhxaiQ",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/04/09915437",
"title": "Deep Learning for Micro-Expression Recognition: A Survey",
"doi": null,
"abstractUrl": "/journal/ta/2022/04/09915437/1HmfM0gSmMU",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956629",
"title": "A Novel Magnification-Robust Network with Sparse Self-Attention for Micro-expression Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956629/1IHoG1sKVig",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956540",
"title": "Seeking Salient Facial Regions for Cross-Database Micro-Expression Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956540/1IHqo3Mexyg",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2020/4272/0/427200a079",
"title": "MER-GCN: Micro-Expression Recognition Based on Relation Modeling with Graph Convolutional Networks",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2020/427200a079/1mAa2bupPgs",
"parentPublication": {
"id": "proceedings/mipr/2020/4272/0",
"title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09363624",
"title": "MERASTC: Micro-expression Recognition using Effective Feature Encodings and 2D Convolutional Neural network",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09363624/1rvy7hb582Y",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09645323",
"title": "A Region Group Adaptive Attention Model For Subtle Expression Recognition",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09645323/1zc6lNxHWlG",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCfAPCc",
"title": "2012 16th International Conference on Information Visualisation",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvT2pfp",
"doi": "10.1109/IV.2012.19",
"title": "Visualizing Patterns in Node-link Diagrams",
"normalizedTitle": "Visualizing Patterns in Node-link Diagrams",
"abstract": "Pattern discovery plays an important part in the graph analysis process. Good examples are the detection of communities in social networks or the clustering into pathways of metabolic networks. However, elements may be shared by several clusters, making the patterns entangled. When mining such data, experts are usually interested in both each individual cluster and their overlaps. Dedicated visualization methods are therefore necessary to efficiently support their exploration process. In this article, we propose a new method that emphasizes patterns in a node-link diagram representation and allows to easily identify overlaps between these patterns as well. Our technique combines graph topology and embedding to compute concave hulls with holes surrounding the patterns of interest.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Pattern discovery plays an important part in the graph analysis process. Good examples are the detection of communities in social networks or the clustering into pathways of metabolic networks. However, elements may be shared by several clusters, making the patterns entangled. When mining such data, experts are usually interested in both each individual cluster and their overlaps. Dedicated visualization methods are therefore necessary to efficiently support their exploration process. In this article, we propose a new method that emphasizes patterns in a node-link diagram representation and allows to easily identify overlaps between these patterns as well. Our technique combines graph topology and embedding to compute concave hulls with holes surrounding the patterns of interest.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Pattern discovery plays an important part in the graph analysis process. Good examples are the detection of communities in social networks or the clustering into pathways of metabolic networks. However, elements may be shared by several clusters, making the patterns entangled. When mining such data, experts are usually interested in both each individual cluster and their overlaps. Dedicated visualization methods are therefore necessary to efficiently support their exploration process. In this article, we propose a new method that emphasizes patterns in a node-link diagram representation and allows to easily identify overlaps between these patterns as well. Our technique combines graph topology and embedding to compute concave hulls with holes surrounding the patterns of interest.",
"fno": "4771a048",
"keywords": [
"Overlapping Clustering",
"Pattern Visualization",
"Graph Analysis"
],
"authors": [
{
"affiliation": null,
"fullName": "A. Lambert",
"givenName": "A.",
"surname": "Lambert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "F. Queyroi",
"givenName": "F.",
"surname": "Queyroi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "R. Bourqui",
"givenName": "R.",
"surname": "Bourqui",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "48-53",
"year": "2012",
"issn": "1550-6037",
"isbn": "978-1-4673-2260-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4771a042",
"articleId": "12OmNzmclwu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4771a054",
"articleId": "12OmNCbU2YS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2008/3268/0/3268a594",
"title": "Visualise Undrawable Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a594/12OmNBOllkb",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csmr/2013/4948/0/4948a047",
"title": "Predicting Project Outcome Leveraging Socio-Technical Network Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/csmr/2013/4948a047/12OmNBSBkci",
"parentPublication": {
"id": "proceedings/csmr/2013/4948/0",
"title": "2013 17th European Conference on Software Maintenance and Reengineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sccompanion/2012/4956/0/4956b457",
"title": "Abstract: Analyzing Patterns in Large-Scale Graphs Using MapReduce in Hadoop",
"doi": null,
"abstractUrl": "/proceedings-article/sccompanion/2012/4956b457/12OmNrNh0Rf",
"parentPublication": {
"id": "proceedings/sccompanion/2012/4956/0",
"title": "2012 SC Companion: High Performance Computing, Networking Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a034",
"title": "Schematization of Node-Link Diagrams and Drawing Techniques for Geo-referenced Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a034/12OmNwEJ0Ua",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2011/9618/0/05718617",
"title": "WordBridge: Using Composite Tag Clouds in Node-Link Diagrams for Visualizing Content and Relations in Text Corpora",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718617/12OmNwwMf1y",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2016/8942/0/8942a045",
"title": "Visualizing Uncertainty of Edge Attributes in Node-Link Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a045/12OmNx38vUi",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010060953",
"title": "eSeeTrack—Visualizing Sequential Fixation Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010060953/13rRUwInvsJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876036",
"title": "Node, Node-Link, and Node-Link-Group Diagrams: An Evaluation",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876036/13rRUxZ0o1D",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2010/09/ttk2010091203",
"title": "Mining Frequent Subgraph Patterns from Uncertain Graph Data",
"doi": null,
"abstractUrl": "/journal/tk/2010/09/ttk2010091203/13rRUyoPSPr",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxzMnU0",
"title": "2011 15th International Conference on Information Visualisation",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy5R3ry",
"doi": "10.1109/IV.2011.42",
"title": "xLDD: Extended Linguistic Dependency Diagrams",
"normalizedTitle": "xLDD: Extended Linguistic Dependency Diagrams",
"abstract": "Extended Linguistic Dependency Diagrams are an innovative visualization of a data structure that is increasingly important in linguistics and language studies. It uses standard Info V is techniques in ways new to linguistic diagrams to encode more information than is possible with previous visualizations. The goal is to make the diagrams easier to use, by allowing easier identification of the parts of the diagram of interest to the user. In addition, we aim to construct reusable tools to aid in language analysis and study. Preliminary evaluation supports the validity of the approach and suggests further improvements.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Extended Linguistic Dependency Diagrams are an innovative visualization of a data structure that is increasingly important in linguistics and language studies. It uses standard Info V is techniques in ways new to linguistic diagrams to encode more information than is possible with previous visualizations. The goal is to make the diagrams easier to use, by allowing easier identification of the parts of the diagram of interest to the user. In addition, we aim to construct reusable tools to aid in language analysis and study. Preliminary evaluation supports the validity of the approach and suggests further improvements.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Extended Linguistic Dependency Diagrams are an innovative visualization of a data structure that is increasingly important in linguistics and language studies. It uses standard Info V is techniques in ways new to linguistic diagrams to encode more information than is possible with previous visualizations. The goal is to make the diagrams easier to use, by allowing easier identification of the parts of the diagram of interest to the user. In addition, we aim to construct reusable tools to aid in language analysis and study. Preliminary evaluation supports the validity of the approach and suggests further improvements.",
"fno": "06004037",
"keywords": [
"Data Structures",
"Data Visualisation",
"Diagrams",
"Directed Graphs",
"Linguistics",
"X LDD",
"Extended Linguistic Dependency Diagrams",
"Data Structure Visualization",
"Language Study",
"Info Vis",
"Linguistic Diagram",
"Language Analysis",
"Dependency Structure",
"Visualization",
"Pragmatics",
"Data Visualization",
"Encoding",
"Color",
"Data Structures",
"Layout",
"Dependency Diagrams",
"Linguistics"
],
"authors": [
{
"affiliation": null,
"fullName": "Chris Culy",
"givenName": "Chris",
"surname": "Culy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Verena Lyding",
"givenName": "Verena",
"surname": "Lyding",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Henrik Dittmann",
"givenName": "Henrik",
"surname": "Dittmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "164-169",
"year": "2011",
"issn": "1550-6037",
"isbn": "978-1-4577-0868-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06004036",
"articleId": "12OmNyRPgqb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06004038",
"articleId": "12OmNwJgAHy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2016/4065/0/07588933",
"title": "Logico-linguistic Semantic Representation of Documents",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2016/07588933/12OmNASILDl",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2016/4065/0",
"title": "2016 IEEE 14th Intl Conf on Dependable, Autonomic and Secure Computing, 14th Intl Conf on Pervasive Intelligence and Computing, 2nd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b032",
"title": "Unsupervised Visual-Linguistic Reference Resolution in Instructional Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b032/12OmNCm7BNq",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2014/5854/0/06934142",
"title": "Linguistic implementations in computer game and virtual world design",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2014/06934142/12OmNrMZpH9",
"parentPublication": {
"id": "proceedings/cgames/2014/5854/0",
"title": "2014 Computer Games: AI, Animation, Mobile, Multimedia, Educational and Serious Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2014/4035/0/06883063",
"title": "Properties of euler diagrams and graphs in combination",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2014/06883063/12OmNvA1hE8",
"parentPublication": {
"id": "proceedings/vlhcc/2014/4035/0",
"title": "2014 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cyberc/2016/5154/0/07864246",
"title": "Multi-Source Heterogeneous Data Recognition Based on Linguistic Labels",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2016/07864246/12OmNvrvj71",
"parentPublication": {
"id": "proceedings/cyberc/2016/5154/0",
"title": "2016 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgciot/2015/7910/0/07380712",
"title": "Spherule diagrams: A matrix-based set visualization compared with Euler diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/icgciot/2015/07380712/12OmNyvGyfY",
"parentPublication": {
"id": "proceedings/icgciot/2015/7910/0",
"title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiaiaai/2014/4174/0/06913370",
"title": "Understanding of Class Diagrams Based on Cognitive Linguistics and Functional Dependency",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2014/06913370/12OmNz2C1t2",
"parentPublication": {
"id": "proceedings/iiaiaai/2014/4174/0",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a111",
"title": "Applying digital technology to linguistic education: a connectivism-based intelligent learning system",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a111/1CzeITpZTdS",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/04/08889698",
"title": "Expressive Authoring of Node-Link Diagrams With Graphies",
"doi": null,
"abstractUrl": "/journal/tg/2021/04/08889698/1eBufwF6gne",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a028",
"title": "Interaction Techniques for Chord Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a028/1rSRcCybLMs",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxWuirg",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAFWONQ",
"doi": "10.1109/VISUAL.2004.73",
"title": "On the Role of Color in the Perception of Motion in Animated Visualizations",
"normalizedTitle": "On the Role of Color in the Perception of Motion in Animated Visualizations",
"abstract": "Although luminance contrast plays a predominant role in motion perception, significant additional effects are introduced by chromatic contrasts. In this paper, relevant results from psychophysical and physiological research are described to clarify the role of color in motion detection. Interpreting these psychophysical experiments, we propose guidelines for the design of animated visualizations, and a calibration procedure that improves the reliability of visual motion representation. The guidelines are applied to examples from texture-based flow visualization, as well as graph and tree visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although luminance contrast plays a predominant role in motion perception, significant additional effects are introduced by chromatic contrasts. In this paper, relevant results from psychophysical and physiological research are described to clarify the role of color in motion detection. Interpreting these psychophysical experiments, we propose guidelines for the design of animated visualizations, and a calibration procedure that improves the reliability of visual motion representation. The guidelines are applied to examples from texture-based flow visualization, as well as graph and tree visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although luminance contrast plays a predominant role in motion perception, significant additional effects are introduced by chromatic contrasts. In this paper, relevant results from psychophysical and physiological research are described to clarify the role of color in motion detection. Interpreting these psychophysical experiments, we propose guidelines for the design of animated visualizations, and a calibration procedure that improves the reliability of visual motion representation. The guidelines are applied to examples from texture-based flow visualization, as well as graph and tree visualization.",
"fno": "87880305",
"keywords": [
"Color",
"Luminance",
"Motion Detection",
"Perception",
"Human Visual System",
"Flow Visualization",
"Information Visualization"
],
"authors": [
{
"affiliation": "University of Stuttgart",
"fullName": "Daniel Weiskopf",
"givenName": "Daniel",
"surname": "Weiskopf",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-10-01T00:00:00",
"pubType": "proceedings",
"pages": "305-312",
"year": "2004",
"issn": null,
"isbn": "0-7803-8788-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "87880297",
"articleId": "12OmNwDSdAl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "87880313",
"articleId": "12OmNApLGKA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/es/2016/3790/0/07880487",
"title": "Recommendations for Data Visualizations Based on Gestalt Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/es/2016/07880487/12OmNCb3ftz",
"parentPublication": {
"id": "proceedings/es/2016/3790/0",
"title": "2016 4th International Conference on Enterprise Systems (ES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660012",
"title": "On the Optimization of Visualizations of Complex Phenomena",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660012/12OmNCfSqN3",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541b017",
"title": "An Effective Image Retrieval Technique Based on Color Perception",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541b017/12OmNqGRG5V",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2009/3733/0/3733a003",
"title": "Visual Perception of Parallel Coordinate Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2009/3733a003/12OmNwGZNMM",
"parentPublication": {
"id": "proceedings/iv/2009/3733/0",
"title": "2009 13th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460962",
"title": "Semantic saliency using k-TR theory of visual perception",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460962/12OmNzdoN5Y",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/05/mcs2013050080",
"title": "Towards More Accessible Visualizations for Color-Vision-Deficient Individuals",
"doi": null,
"abstractUrl": "/magazine/cs/2013/05/mcs2013050080/13rRUEgs2Q0",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122516",
"title": "Perception of Visual Variables on Tiled Wall-Sized Displays for Information Visualization Applications",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122516/13rRUwwJWFM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09969167",
"title": "Image-Driven Harmonious Color Palette Generation for Diverse Information Visualization",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09969167/1IMicNIXex2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805431",
"title": "Common Fate for Animated Transitions in Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805431/1cG4F76usA8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwCJOWD",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"acronym": "icassp",
"groupId": "1000002",
"volume": "0",
"displayVolume": "0",
"year": "1991",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCb3fuk",
"doi": "10.1109/ICASSP.1991.150990",
"title": "Global motion identification for image sequence analysis and coding",
"normalizedTitle": "Global motion identification for image sequence analysis and coding",
"abstract": "A new method is proposed to estimate global motion parameters from an unusual initial dense velocity field. The authors first want to obtain a compact representation of a dense velocity field and compute a spatiotemporal motion-based segmentation; then these parameters are used as initial values of a cost-function minimization algorithm and applied within a motion compensation loop for image sequence coding. Promising results are obtained on real TV image sequences. A compact motion representation is generated at each frame, and a quite interpretable qualitative and quantitative motion field is synthesized. Moreover, high quality of reconstruction and motion interpretation is obtained using the minimization stage.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "A new method is proposed to estimate global motion parameters from an unusual initial dense velocity field. The authors first want to obtain a compact representation of a dense velocity field and compute a spatiotemporal motion-based segmentation; then these parameters are used as initial values of a cost-function minimization algorithm and applied within a motion compensation loop for image sequence coding. Promising results are obtained on real TV image sequences. A compact motion representation is generated at each frame, and a quite interpretable qualitative and quantitative motion field is synthesized. Moreover, high quality of reconstruction and motion interpretation is obtained using the minimization stage.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A new method is proposed to estimate global motion parameters from an unusual initial dense velocity field. The authors first want to obtain a compact representation of a dense velocity field and compute a spatiotemporal motion-based segmentation; then these parameters are used as initial values of a cost-function minimization algorithm and applied within a motion compensation loop for image sequence coding. Promising results are obtained on real TV image sequences. A compact motion representation is generated at each frame, and a quite interpretable qualitative and quantitative motion field is synthesized. Moreover, high quality of reconstruction and motion interpretation is obtained using the minimization stage.",
"fno": "00150990",
"keywords": [
"Encoding",
"Parameter Estimation",
"Picture Processing",
"Video Signals",
"Parameter Estimation",
"Global Motion Identification",
"Image Sequence Analysis",
"Global Motion Parameters",
"Dense Velocity Field",
"Spatiotemporal Motion Based Segmentation",
"Cost Function Minimization Algorithm",
"Motion Compensation",
"Image Sequence Coding",
"TV Image Sequences",
"Motion Representation",
"Motion Field",
"Motion Interpretation",
"Motion Analysis",
"Image Sequence Analysis",
"Image Coding",
"Image Sequences",
"Motion Estimation",
"Parameter Estimation",
"Spatiotemporal Phenomena",
"Image Segmentation",
"Minimization Methods",
"Motion Compensation"
],
"authors": [
{
"affiliation": "IRISA/INRIA, Rennes, France",
"fullName": "H. Nicolas",
"givenName": "H.",
"surname": "Nicolas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRISA/INRIA, Rennes, France",
"fullName": "C. Labit",
"givenName": "C.",
"surname": "Labit",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icassp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1991-01-01T00:00:00",
"pubType": "proceedings",
"pages": "2825,2826,2827,2828",
"year": "1991",
"issn": "1520-6149",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00150989",
"articleId": "12OmNyuy9Nz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00150991",
"articleId": "12OmNwHhoRe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1995/7310/2/73102189",
"title": "Motion estimation for region-based video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73102189/12OmNBrV1RP",
"parentPublication": {
"id": "proceedings/icip/1995/7310/2",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150967",
"title": "A new motion compensation/image sequence coding scheme",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150967/12OmNCwlagl",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239252",
"title": "Temporally consistent diffeomorphic motion estimation with mutual information: Application to echocardiographic sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239252/12OmNqBKU9m",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150893",
"title": "Monocular motion estimation using a long sequence of noisy images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150893/12OmNqBtiYx",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/1/00413409",
"title": "Coding image sequence intensities along motion trajectories using EC-CELP quantization",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413409/12OmNvDqsO2",
"parentPublication": {
"id": "proceedings/icip/1994/6952/3",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1996/7358/0/73580112",
"title": "Morphological motion field representation for region-based image sequence coding",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1996/73580112/12OmNvSKNU7",
"parentPublication": {
"id": "proceedings/dcc/1996/7358/0",
"title": "Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118132",
"title": "Multimodal motion estimation and segmentation using Markov random fields",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118132/12OmNxxNbSN",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150995",
"title": "Coherent disparity and motion compensation in 3DTV image sequence coding schemes",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150995/12OmNyVes18",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a041",
"title": "Estimating Apparent Motion on Satellite Acquisitions with a Physical Dynamic Model",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a041/12OmNzUPprr",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1996/01/i0058",
"title": "MDL-Based Segmentation and Motion Modeling in a Long Image Sequence of Scene with Multiple Independently Moving Objects",
"doi": null,
"abstractUrl": "/journal/tp/1996/01/i0058/13rRUyYjK5V",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwCJOWD",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"acronym": "icassp",
"groupId": "1000002",
"volume": "0",
"displayVolume": "0",
"year": "1991",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBtiYx",
"doi": "10.1109/ICASSP.1991.150893",
"title": "Monocular motion estimation using a long sequence of noisy images",
"normalizedTitle": "Monocular motion estimation using a long sequence of noisy images",
"abstract": "A kinematic model based approach is discussed for the estimation of 3-D motion and structure parameters from a sequence of noisy monocular images. The approach is based on representing the constant velocity translation and constant angular velocity motion using nine rectilinear motion parameters, which are the 3-D vectors of initial position, linear velocity, and angular velocity. The rotational motion is propagated in the kinematic model using the standard 3*3 rotation matrix. The measurements are noisy perturbations of 2-D image locations of feature points. It is assumed that the 3-D feature points are extracted from the images and matched over the frames. The structure of the moving object is represented by the coordinates of feature points in a 3-D coordinate system fixed on the object. A nonlinear least squares method is used to formulate the batch estimation of motion and structure parameters.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "A kinematic model based approach is discussed for the estimation of 3-D motion and structure parameters from a sequence of noisy monocular images. The approach is based on representing the constant velocity translation and constant angular velocity motion using nine rectilinear motion parameters, which are the 3-D vectors of initial position, linear velocity, and angular velocity. The rotational motion is propagated in the kinematic model using the standard 3*3 rotation matrix. The measurements are noisy perturbations of 2-D image locations of feature points. It is assumed that the 3-D feature points are extracted from the images and matched over the frames. The structure of the moving object is represented by the coordinates of feature points in a 3-D coordinate system fixed on the object. A nonlinear least squares method is used to formulate the batch estimation of motion and structure parameters.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A kinematic model based approach is discussed for the estimation of 3-D motion and structure parameters from a sequence of noisy monocular images. The approach is based on representing the constant velocity translation and constant angular velocity motion using nine rectilinear motion parameters, which are the 3-D vectors of initial position, linear velocity, and angular velocity. The rotational motion is propagated in the kinematic model using the standard 3*3 rotation matrix. The measurements are noisy perturbations of 2-D image locations of feature points. It is assumed that the 3-D feature points are extracted from the images and matched over the frames. The structure of the moving object is represented by the coordinates of feature points in a 3-D coordinate system fixed on the object. A nonlinear least squares method is used to formulate the batch estimation of motion and structure parameters.",
"fno": "00150893",
"keywords": [
"Least Squares Approximations",
"Noise",
"Parameter Estimation",
"Picture Processing",
"Monocular Motion Estimation",
"3 D Motion Parameters",
"Long Sequence",
"Noisy Images",
"Kinematic Model Based Approach",
"Structure Parameters",
"Constant Velocity Translation",
"Constant Angular Velocity Motion",
"Rectilinear Motion Parameters",
"Moving Object",
"Nonlinear Least Squares Method",
"Motion Estimation",
"Angular Velocity",
"Robot Kinematics",
"Robustness",
"Parameter Estimation",
"Quaternions",
"Signal Processing",
"Image Processing",
"Vectors",
"Feature Extraction"
],
"authors": [
{
"affiliation": "Dept. of Electr. Eng.-Syst., Univ. of Southern California, Los Angeles, CA, USA",
"fullName": "G.S. Young",
"givenName": "G.S.",
"surname": "Young",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng.-Syst., Univ. of Southern California, Los Angeles, CA, USA",
"fullName": "R. Chellappa",
"givenName": "R.",
"surname": "Chellappa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng.-Syst., Univ. of Southern California, Los Angeles, CA, USA",
"fullName": "T.H. Wu",
"givenName": "T.H.",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icassp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1991-01-01T00:00:00",
"pubType": "proceedings",
"pages": "2437,2438,2439,2440",
"year": "1991",
"issn": "1520-6149",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00150892",
"articleId": "12OmNxeut3h",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00150894",
"articleId": "12OmNC4eSxf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/robot/1989/1938/0/00100080",
"title": "A quasi-static analysis of dextrous manipulation with sliding and rolling contacts",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00100080/12OmNvA1haF",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139804",
"title": "Estimation of motion and structure of planar surfaces from a sequence of monocular images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139804/12OmNwnH4RZ",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1988/0862/0/00196312",
"title": "3-D motion estimation using a sequence of noisy stereo images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1988/00196312/12OmNyKa61H",
"parentPublication": {
"id": "proceedings/cvpr/1988/0862/0",
"title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118131",
"title": "Statistical analysis of inherent ambiguities in recovering 3-D motion from a noisy flow field",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118131/12OmNyRxFJQ",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012197",
"title": "On the spatial motion of a rigid body with line contact",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012197/12OmNzmLxL0",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/1/00754044",
"title": "Uniqueness Results For Constant Acceleration And Precession Model in 3-DS Motion Estimation Using A Sequence Of Noisy Stereo Images/spl dag/",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754044/12OmNzw8je4",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/1",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1990/08/i0735",
"title": "3-D Motion Estimation Using a Sequence of Noisy Stereo Images: Models, Estimation, and Uniqueness Results",
"doi": null,
"abstractUrl": "/journal/tp/1990/08/i0735/13rRUxAASX3",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1993/05/i0434",
"title": "The Accuracy of the Computation of Optical Flow and of the Recovery of Motion Parameters",
"doi": null,
"abstractUrl": "/journal/tp/1993/05/i0434/13rRUxBJhwr",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1989/05/i0490",
"title": "Motion Field and Optical Flow: Qualitative Properties",
"doi": null,
"abstractUrl": "/journal/tp/1989/05/i0490/13rRUxBa5od",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxwWorE",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMZpBS",
"doi": "10.1109/ICCVW.2009.5457629",
"title": "Provably convergent on-line structure and motion estimation for perspective systems",
"normalizedTitle": "Provably convergent on-line structure and motion estimation for perspective systems",
"abstract": "Estimation of structure and motion in computer vision systems can be performed using a dynamic systems approach, where states and parameters in a perspective system are estimated. This paper presents a new approach to the structure estimation problem, where the estimation of the 3D-positions of feature points on a moving object is reformulated as a parameter estimation problem. For each feature point, a constant parameter is estimated, from which it is possible to calculate the time-varying 3D-position. The estimation method is extended to the estimation of motion, in the form of angular velocity estimation. The combined structure and angular velocity estimator is shown stable using Lyapunov theory and persistency of excitation based arguments. The estimation method is illustrated with simulation examples, demonstrating the estimation convergence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Estimation of structure and motion in computer vision systems can be performed using a dynamic systems approach, where states and parameters in a perspective system are estimated. This paper presents a new approach to the structure estimation problem, where the estimation of the 3D-positions of feature points on a moving object is reformulated as a parameter estimation problem. For each feature point, a constant parameter is estimated, from which it is possible to calculate the time-varying 3D-position. The estimation method is extended to the estimation of motion, in the form of angular velocity estimation. The combined structure and angular velocity estimator is shown stable using Lyapunov theory and persistency of excitation based arguments. The estimation method is illustrated with simulation examples, demonstrating the estimation convergence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Estimation of structure and motion in computer vision systems can be performed using a dynamic systems approach, where states and parameters in a perspective system are estimated. This paper presents a new approach to the structure estimation problem, where the estimation of the 3D-positions of feature points on a moving object is reformulated as a parameter estimation problem. For each feature point, a constant parameter is estimated, from which it is possible to calculate the time-varying 3D-position. The estimation method is extended to the estimation of motion, in the form of angular velocity estimation. The combined structure and angular velocity estimator is shown stable using Lyapunov theory and persistency of excitation based arguments. The estimation method is illustrated with simulation examples, demonstrating the estimation convergence.",
"fno": "05457629",
"keywords": [
"Computer Vision",
"Image Sequences",
"Motion Estimation",
"Perspective Systems",
"On Line Structure Estimation",
"Computer Vision Systems",
"Parameter Estimation Problem",
"Time Varying 3 D Position",
"Angular Velocity Estimation",
"Lyapunov Theory",
"Motion Estimation",
"Conferences",
"Computer Vision"
],
"authors": [
{
"affiliation": "Centre for Mathematical Sciences, Lund University, Sweden",
"fullName": "Anders Heyden",
"givenName": "Anders",
"surname": "Heyden",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Applied Mathematics Group Malmo Univesrity, Sweden",
"fullName": "Ola Dahl",
"givenName": "Ola",
"surname": "Dahl",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "751-758",
"year": "2009",
"issn": null,
"isbn": "978-1-4244-4442-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05457628",
"articleId": "12OmNvqW6Zs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05457626",
"articleId": "12OmNBhZ4hy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/1993/3870/0/00378191",
"title": "Motion segmentation and local structure",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378191/12OmNBJNL1s",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wvm/1989/1903/0/00047130",
"title": "Motion from images: image matching, parameter estimation and intrinsic stability",
"doi": null,
"abstractUrl": "/proceedings-article/wvm/1989/00047130/12OmNBWzHQD",
"parentPublication": {
"id": "proceedings/wvm/1989/1903/0",
"title": "Proceedings Workshop on Visual Motion",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1996/7258/0/72580327",
"title": "Structure and motion of curved 3D objects from monocular silhouettes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1996/72580327/12OmNC3Xhcl",
"parentPublication": {
"id": "proceedings/cvpr/1996/7258/0",
"title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150893",
"title": "Monocular motion estimation using a long sequence of noisy images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150893/12OmNqBtiYx",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118083",
"title": "Estimating motion and structure from line matches: performance obtained and beyond",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118083/12OmNwF0C0x",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1994/5825/0/00323934",
"title": "Motion and structure from one dimensional optical flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1994/00323934/12OmNy2Jt5Q",
"parentPublication": {
"id": "proceedings/cvpr/1994/5825/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1988/0862/0/00196312",
"title": "3-D motion estimation using a sequence of noisy stereo images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1988/00196312/12OmNyKa61H",
"parentPublication": {
"id": "proceedings/cvpr/1988/0862/0",
"title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761895",
"title": "Dynamic structure from motion based on nonlinear adaptive observers",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761895/12OmNzayNtK",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1999/0149/1/01491164",
"title": "Fast, Robust, and Consistent Camera Motion Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1999/01491164/12OmNzw8j3T",
"parentPublication": {
"id": "proceedings/cvpr/1999/0149/2",
"title": "Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1989/05/i0451",
"title": "Motion and Structure From Two Perspective Views: Algorithms, Error Analysis, and Error Estimation",
"doi": null,
"abstractUrl": "/journal/tp/1989/05/i0451/13rRUyY28Zf",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxHrym8",
"title": "Proceedings Workshop on Visual Motion",
"acronym": "wvm",
"groupId": "1000794",
"volume": "0",
"displayVolume": "0",
"year": "1989",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwe2IAV",
"doi": "10.1109/WVM.1989.47119",
"title": "Estimation of 3-D motion and structure based on a temporally-oriented approach with the method of regression",
"normalizedTitle": "Estimation of 3-D motion and structure based on a temporally-oriented approach with the method of regression",
"abstract": "It is argued that the 3-D velocity of a single point up to a scalar factor can be recovered from its 2-D trajectory under the perspective projection. The authors then extend this idea to the recovery of 3-D motion of rigid objects. In both cases measurements are collected through temporal axis first. The analysis is based on the assumption that the 3-D motion of object is smooth so that its 3-D velocity can be approximated as a truncated Taylor series of the predetermined degree. Regression relations between unknown motion parameters and measurements for a single point and rigid body are derived. The method of maximum likelihood is used to estimate the motion. The uniqueness of determining the 3-D motion of the single point is discussed. Experimental results obtained from simulated data and real images are given to illustrate the robustness of this approach.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "It is argued that the 3-D velocity of a single point up to a scalar factor can be recovered from its 2-D trajectory under the perspective projection. The authors then extend this idea to the recovery of 3-D motion of rigid objects. In both cases measurements are collected through temporal axis first. The analysis is based on the assumption that the 3-D motion of object is smooth so that its 3-D velocity can be approximated as a truncated Taylor series of the predetermined degree. Regression relations between unknown motion parameters and measurements for a single point and rigid body are derived. The method of maximum likelihood is used to estimate the motion. The uniqueness of determining the 3-D motion of the single point is discussed. Experimental results obtained from simulated data and real images are given to illustrate the robustness of this approach.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It is argued that the 3-D velocity of a single point up to a scalar factor can be recovered from its 2-D trajectory under the perspective projection. The authors then extend this idea to the recovery of 3-D motion of rigid objects. In both cases measurements are collected through temporal axis first. The analysis is based on the assumption that the 3-D motion of object is smooth so that its 3-D velocity can be approximated as a truncated Taylor series of the predetermined degree. Regression relations between unknown motion parameters and measurements for a single point and rigid body are derived. The method of maximum likelihood is used to estimate the motion. The uniqueness of determining the 3-D motion of the single point is discussed. Experimental results obtained from simulated data and real images are given to illustrate the robustness of this approach.",
"fno": "00047119",
"keywords": [
"Computer Vision",
"Computerised Pattern Recognition",
"Computerised Picture Processing",
"3 D Motion",
"3 D Structure",
"Motion Analysis",
"Regression Relations",
"Temporally Oriented Approach",
"Regression",
"Perspective Projection",
"Rigid Objects",
"Truncated Taylor Series",
"Unknown Motion Parameters",
"Single Point",
"Rigid Body",
"Maximum Likelihood",
"Motion Estimation",
"Image Motion Analysis",
"Motion Measurement",
"Fluid Flow Measurement",
"Optical Sensors",
"Layout",
"Motion Analysis",
"Taylor Series",
"Computational Modeling",
"Robustness"
],
"authors": [
{
"affiliation": "Grasp Lab., Pennsylvania Univ., Philadelphia, PA, USA",
"fullName": "S.-L. Iu",
"givenName": "S.-L.",
"surname": "Iu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grasp Lab., Pennsylvania Univ., Philadelphia, PA, USA",
"fullName": "K. Wohn",
"givenName": "K.",
"surname": "Wohn",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wvm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1989-01-01T00:00:00",
"pubType": "proceedings",
"pages": "273,274,275,276,277,278,279,280,281",
"year": "1989",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00047118",
"articleId": "12OmNqyUUs5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00047120",
"articleId": "12OmNzn38MV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2014/7000/1/7000a293",
"title": "Gradient-Based Differential Approach for 3-D Motion Compensation in Interventional 2-D/3-D Image Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a293/12OmNBeRtQV",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1994/6405/2/00471661",
"title": "Determining 3-D hand motion",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1994/00471661/12OmNBpEeWi",
"parentPublication": {
"id": "proceedings/acssc/1994/6405/1",
"title": "Proceedings of 1994 28th Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131740",
"title": "A robust 3-D motion estimation with stereo cameras on a robot manipulator",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131740/12OmNqGA51z",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100390",
"title": "Object based 3-D motion and structure estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100390/12OmNrYCXSb",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139710",
"title": "Recovery of non-rigid motion and structure",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139710/12OmNvHY2Gx",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576365",
"title": "Rigid registration of 3-D objects by motion analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576365/12OmNx3q718",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118131",
"title": "Statistical analysis of inherent ambiguities in recovering 3-D motion from a noisy flow field",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118131/12OmNyRxFJQ",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1994/6256/0/00315980",
"title": "3-D motion estimation algorithms using 4-D data",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1994/00315980/12OmNyYm2x9",
"parentPublication": {
"id": "proceedings/cbms/1994/6256/0",
"title": "Proceedings of IEEE Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1997/7943/0/79430107",
"title": "Frequency Domain Estimation of 3-D Rigid Motion Based on Range and Intensity Data",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1997/79430107/12OmNz5JCbz",
"parentPublication": {
"id": "proceedings/3dim/1997/7943/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1989/05/i0528",
"title": "A General Aperture Problem for Direct Estimation of 3-D Motion Parameters",
"doi": null,
"abstractUrl": "/journal/tp/1989/05/i0528/13rRUwd9CGO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx7ouUM",
"title": "2013 International Conference on Computational and Information Sciences",
"acronym": "iccis",
"groupId": "1800262",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxXUhUk",
"doi": "10.1109/ICCIS.2013.37",
"title": "An Improved Particle Filter Tracking Algorithm Based on Motion and Appearance Features",
"normalizedTitle": "An Improved Particle Filter Tracking Algorithm Based on Motion and Appearance Features",
"abstract": "Particle filter (PF) has proven successfully for nonlinear and non-Gaussian estimate problems, but its degeneracy will influence the results of tracking. Therefore in the paper, the optical flow algorithm is utilized to generate the proposal distribution of particle filter. With the velocity message which is estimated by optical flow algorithm, the particles could be generated in a right direction. So that, more particles are given a relatively greater weight, which improves the particle degradation. This article proposes the integration of color histogram and wavelet moment into tracking algorithm for improving the veracity of object tracking. An improved particle filter tracking algorithm based on optical flow, color histogram and wavelet moment is proposed in this paper.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Particle filter (PF) has proven successfully for nonlinear and non-Gaussian estimate problems, but its degeneracy will influence the results of tracking. Therefore in the paper, the optical flow algorithm is utilized to generate the proposal distribution of particle filter. With the velocity message which is estimated by optical flow algorithm, the particles could be generated in a right direction. So that, more particles are given a relatively greater weight, which improves the particle degradation. This article proposes the integration of color histogram and wavelet moment into tracking algorithm for improving the veracity of object tracking. An improved particle filter tracking algorithm based on optical flow, color histogram and wavelet moment is proposed in this paper.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Particle filter (PF) has proven successfully for nonlinear and non-Gaussian estimate problems, but its degeneracy will influence the results of tracking. Therefore in the paper, the optical flow algorithm is utilized to generate the proposal distribution of particle filter. With the velocity message which is estimated by optical flow algorithm, the particles could be generated in a right direction. So that, more particles are given a relatively greater weight, which improves the particle degradation. This article proposes the integration of color histogram and wavelet moment into tracking algorithm for improving the veracity of object tracking. An improved particle filter tracking algorithm based on optical flow, color histogram and wavelet moment is proposed in this paper.",
"fno": "5004a110",
"keywords": [
"Computer Vision",
"Optical Imaging",
"Image Motion Analysis",
"Optical Filters",
"Particle Filters",
"Image Color Analysis",
"Target Tracking",
"Wavelet Moment Invariant",
"Human Motion Tracking",
"Particle Filter",
"Optical Flow"
],
"authors": [
{
"affiliation": null,
"fullName": "Lichun Wang",
"givenName": "Lichun",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lanxiao Li",
"givenName": "Lanxiao",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dehui Kong",
"givenName": "Dehui",
"surname": "Kong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-06-01T00:00:00",
"pubType": "proceedings",
"pages": "110-113",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5004-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5004a106",
"articleId": "12OmNvjgWTJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5004a114",
"articleId": "12OmNzayNcC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460939",
"title": "Articulated Particle Filter for hand tracking",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460939/12OmNAR1aYw",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/1/01326048",
"title": "Formant tracking by mixture state particle filter",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326048/12OmNBE7Mp6",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2012/4687/0/4687a219",
"title": "Object Tracking Based on Fuzzy Information Employing a Particle Filter Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2012/4687a219/12OmNCbU349",
"parentPublication": {
"id": "proceedings/cisis/2012/4687/0",
"title": "2012 Sixth International Conference on Complex, Intelligent, and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2016/2535/0/2535a739",
"title": "Vehicle Tracking Incorporating Low-Rank Sparse into Particle Filter in Haze Scene",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2016/2535a739/12OmNvjgWNV",
"parentPublication": {
"id": "proceedings/icisce/2016/2535/0",
"title": "2016 3rd International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/git4ndm/2013/2269/0/2269a017",
"title": "Particle-Filter Multi-Target Tracking Algorithm Based on Dynamic Salient Features",
"doi": null,
"abstractUrl": "/proceedings-article/git4ndm/2013/2269a017/12OmNwl8GIK",
"parentPublication": {
"id": "proceedings/git4ndm/2013/2269/0",
"title": "2013 Fifth International Conference on Geo-Information Technologies for Natural Disaster Management (GiT4NDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a374",
"title": "Object Tracking with Sparse Representation and Annealed Particle Filter",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a374/12OmNyPQ4QN",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2015/7632/0/07301737",
"title": "Particle filter based Conjoint Individual-Group Tracker (CIGT)",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2015/07301737/12OmNz4SOD6",
"parentPublication": {
"id": "proceedings/avss/2015/7632/0",
"title": "2015 12th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2013/2549/0/06746516",
"title": "Knowledge-Based Cooperative Particle Filter",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2013/06746516/12OmNzuZUBR",
"parentPublication": {
"id": "proceedings/cis/2013/2549/0",
"title": "2013 Ninth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tcs/2021/2910/0/291000a451",
"title": "Sports Video Target Tracking Algorithm Based on Optimized Particle Swarm Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/tcs/2021/291000a451/1wRIlRTpTIQ",
"parentPublication": {
"id": "proceedings/tcs/2021/2910/0",
"title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2021/2835/0/283500a439",
"title": "An FPGA-based Power-saving Particle Filter Using Dynamic Reconfiguration",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2021/283500a439/1zw5MsymmWI",
"parentPublication": {
"id": "proceedings/candarw/2021/2835/0",
"title": "2021 Ninth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAYXWAA",
"title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "1988",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyKa61H",
"doi": "10.1109/CVPR.1988.196312",
"title": "3-D motion estimation using a sequence of noisy stereo images",
"normalizedTitle": "3-D motion estimation using a sequence of noisy stereo images",
"abstract": "The authors discuss a kinematic-model-based approach for the estimation of 3-D motion parameters from a sequence of noisy stereo images. The approach consists of representing the constant acceleration translational motion and constant angular velocity or constant precession rotational motion in the form of a bilinear state-space model using standard rectilinear states for translation and quaternions for rotation. Closed-form solutions of state transition equations are obtained to propagate the quaternions in both constant-angular-velocity and constant-precession models. The measurements are noisy perturbations of 3-D feature points represented in an inertial coordinate system. It is assumed that the structure is known and that 3-D feature points are extracted from the stereo images and matched over the frames. Owing to the nonlinearity in the state model, nonlinear filters are designed for the estimation of motion parameters.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors discuss a kinematic-model-based approach for the estimation of 3-D motion parameters from a sequence of noisy stereo images. The approach consists of representing the constant acceleration translational motion and constant angular velocity or constant precession rotational motion in the form of a bilinear state-space model using standard rectilinear states for translation and quaternions for rotation. Closed-form solutions of state transition equations are obtained to propagate the quaternions in both constant-angular-velocity and constant-precession models. The measurements are noisy perturbations of 3-D feature points represented in an inertial coordinate system. It is assumed that the structure is known and that 3-D feature points are extracted from the stereo images and matched over the frames. Owing to the nonlinearity in the state model, nonlinear filters are designed for the estimation of motion parameters.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors discuss a kinematic-model-based approach for the estimation of 3-D motion parameters from a sequence of noisy stereo images. The approach consists of representing the constant acceleration translational motion and constant angular velocity or constant precession rotational motion in the form of a bilinear state-space model using standard rectilinear states for translation and quaternions for rotation. Closed-form solutions of state transition equations are obtained to propagate the quaternions in both constant-angular-velocity and constant-precession models. The measurements are noisy perturbations of 3-D feature points represented in an inertial coordinate system. It is assumed that the structure is known and that 3-D feature points are extracted from the stereo images and matched over the frames. Owing to the nonlinearity in the state model, nonlinear filters are designed for the estimation of motion parameters.",
"fno": "00196312",
"keywords": [
"Filtering And Prediction Theory",
"Parameter Estimation",
"Picture Processing",
"State Space Methods",
"3 D Motion Estimation",
"Parameter Estimation",
"3 D Feature Point",
"Picture Processing",
"Noisy Stereo Images",
"Translational Motion",
"Angular Velocity",
"Rotational Motion",
"Bilinear State Space Model",
"Nonlinearity",
"State Model",
"Nonlinear Filters",
"Motion Estimation",
"Quaternions",
"Acceleration",
"Angular Velocity",
"Closed Form Solution",
"Equations",
"Coordinate Measuring Machines",
"Feature Extraction",
"Nonlinear Filters",
"Parameter Estimation"
],
"authors": [
{
"affiliation": "Dept. of Electr. Eng. Syst., Univ. of Southern California, Los Angeles, CA, USA",
"fullName": "G.-S. Young",
"givenName": "G.-S.",
"surname": "Young",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng. Syst., Univ. of Southern California, Los Angeles, CA, USA",
"fullName": "R. Chellapa",
"givenName": "R.",
"surname": "Chellapa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1988-01-01T00:00:00",
"pubType": "proceedings",
"pages": "710,711,712,713,714,715,716",
"year": "1988",
"issn": "1063-6919",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00196311",
"articleId": "12OmNwt5sjK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00196313",
"articleId": "12OmNqzcvEr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/1988/0878/0/00028391",
"title": "Recursively estimating optical flow from a noisy image sequence",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028391/12OmNAY79gJ",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118073",
"title": "A Kalman filter approach for accurate 3D motion estimation from a sequence of stereo images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118073/12OmNB8kHSG",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a293",
"title": "Gradient-Based Differential Approach for 3-D Motion Compensation in Interventional 2-D/3-D Image Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a293/12OmNBeRtQV",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150893",
"title": "Monocular motion estimation using a long sequence of noisy images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150893/12OmNqBtiYx",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131740",
"title": "A robust 3-D motion estimation with stereo cameras on a robot manipulator",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131740/12OmNqGA51z",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1994/6256/0/00315980",
"title": "3-D motion estimation algorithms using 4-D data",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1994/00315980/12OmNyYm2x9",
"parentPublication": {
"id": "proceedings/cbms/1994/6256/0",
"title": "Proceedings of IEEE Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wvm/1989/1903/0/00047121",
"title": "Using motion from orthographic projections to prune 3-D point matches",
"doi": null,
"abstractUrl": "/proceedings-article/wvm/1989/00047121/12OmNzSh19C",
"parentPublication": {
"id": "proceedings/wvm/1989/1903/0",
"title": "Proceedings Workshop on Visual Motion",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/1/00754044",
"title": "Uniqueness Results For Constant Acceleration And Precession Model in 3-DS Motion Estimation Using A Sequence Of Noisy Stereo Images/spl dag/",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754044/12OmNzw8je4",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/1",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1990/08/i0735",
"title": "3-D Motion Estimation Using a Sequence of Noisy Stereo Images: Models, Estimation, and Uniqueness Results",
"doi": null,
"abstractUrl": "/journal/tp/1990/08/i0735/13rRUxAASX3",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1987/03/04767920",
"title": "3-D Motion Estimation, Understanding, and Prediction from Noisy Image Sequences",
"doi": null,
"abstractUrl": "/journal/tp/1987/03/04767920/13rRUyYBlhq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx6g6nU",
"title": "Proceedings of the 2003 Winter Simulation Conference",
"acronym": "wsc",
"groupId": "1000674",
"volume": "2",
"displayVolume": "2",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzC5SLU",
"doi": "10.1109/WSC.2003.1261597",
"title": "Variable-speed resource motion in animations of discrete-event process models",
"normalizedTitle": "Variable-speed resource motion in animations of discrete-event process models",
"abstract": "We present research that addresses the problem of describing the accurate, variable-speed motion of simulation objects on realistically-shaped trajectories (i.e. paths) in animations of discrete-event simulation models. The work puts in place techniques that modelers can use to instruct virtual simulation objects to follow any arbitrarily-shaped velocity profiles while adhering to fixed motion completion times when traversing along any defined motion path trajectories. A computation scheme that allows simulation models to define the general shapes of relevant velocity profiles and then heuristically scales those profiles to accommodate communicated activity instance durations is presented. While allowing animated simulation objects to be moved with any arbitrarily shaped velocity profiles, this technique ensures that an object's temporospatial control rests entirely with the underlying simulation models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present research that addresses the problem of describing the accurate, variable-speed motion of simulation objects on realistically-shaped trajectories (i.e. paths) in animations of discrete-event simulation models. The work puts in place techniques that modelers can use to instruct virtual simulation objects to follow any arbitrarily-shaped velocity profiles while adhering to fixed motion completion times when traversing along any defined motion path trajectories. A computation scheme that allows simulation models to define the general shapes of relevant velocity profiles and then heuristically scales those profiles to accommodate communicated activity instance durations is presented. While allowing animated simulation objects to be moved with any arbitrarily shaped velocity profiles, this technique ensures that an object's temporospatial control rests entirely with the underlying simulation models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present research that addresses the problem of describing the accurate, variable-speed motion of simulation objects on realistically-shaped trajectories (i.e. paths) in animations of discrete-event simulation models. The work puts in place techniques that modelers can use to instruct virtual simulation objects to follow any arbitrarily-shaped velocity profiles while adhering to fixed motion completion times when traversing along any defined motion path trajectories. A computation scheme that allows simulation models to define the general shapes of relevant velocity profiles and then heuristically scales those profiles to accommodate communicated activity instance durations is presented. While allowing animated simulation objects to be moved with any arbitrarily shaped velocity profiles, this technique ensures that an object's temporospatial control rests entirely with the underlying simulation models.",
"fno": "01261597",
"keywords": [
"Computer Animation",
"Discrete Event Simulation",
"Engineering Graphics",
"Variable Speed Resource Motion",
"Discrete Event Process Model Animation",
"Simulation Object",
"Realistically Shaped Trajectory",
"Virtual Simulation Object",
"Arbitrarily Shaped Velocity Profile",
"Object Temporospatial Control",
"Animation",
"Airplanes",
"Visualization",
"Computational Modeling",
"Roads",
"Kinetic Theory",
"Engines",
"Hoses"
],
"authors": [
{
"affiliation": "Dept. of Civil & Environ. Eng., Michigan Univ., Ann Arbor, MI, USA",
"fullName": "Kamat",
"givenName": null,
"surname": "Kamat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Martinez",
"givenName": null,
"surname": "Martinez",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wsc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1518-1526 vol.2",
"year": "2003",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01261596",
"articleId": "12OmNxFJXUN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01261598",
"articleId": "12OmNs5rl01",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cloudcom/2016/1445/0/07830665",
"title": "Towards Green Transportation: Fast Vehicle Velocity Optimization for Fuel Efficiency",
"doi": null,
"abstractUrl": "/proceedings-article/cloudcom/2016/07830665/12OmNBaT61U",
"parentPublication": {
"id": "proceedings/cloudcom/2016/1445/0",
"title": "2016 IEEE International Conference on Cloud Computing Technology and Science (CloudCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crc/2016/3572/0/3572a055",
"title": "Kinematic Optimization of Cam Mechanisms with Variable Input Speed",
"doi": null,
"abstractUrl": "/proceedings-article/crc/2016/3572a055/12OmNBlofTK",
"parentPublication": {
"id": "proceedings/crc/2016/3572/0",
"title": "2016 International Conference on Cybernetics, Robotics and Control (CRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2009/3804/3/3804c575",
"title": "Effect of Ocean Current to Low Velocity Maneuvers of Torpedo-Like Long-Distance AUV",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2009/3804c575/12OmNxEjY6T",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wsc/2001/7307/1/73071291",
"title": "Discrete event fluid modeling of TCP",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2001/73071291/12OmNxFJXOJ",
"parentPublication": {
"id": "proceedings/wsc/2001/7307/1",
"title": "Proceedings of the 2001 Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/simultech/2014/060/0/07094994",
"title": "The Front Velocity approach in the modelling of Simulated Moving Bed process (SMB)",
"doi": null,
"abstractUrl": "/proceedings-article/simultech/2014/07094994/12OmNxTmHLi",
"parentPublication": {
"id": "proceedings/simultech/2014/060/0",
"title": "2014 International Conference on Simulation and Modeling Methodologies, Technologies and Applications (SIMULTECH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012267",
"title": "Controlling velocity-limited systems to reduce residual vibration",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012267/12OmNy1SFCD",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131958",
"title": "Controllability issues of robots in singular configurations",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131958/12OmNyQYtgE",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/09/06065732",
"title": "Social-Event-Driven Camera Control for Multicharacter Animations",
"doi": null,
"abstractUrl": "/journal/tg/2012/09/06065732/13rRUxAATgw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sose/2019/1442/0/144200a319",
"title": "Decentralized Velocity-Aware Motion Planning for Multi-agent Coordination",
"doi": null,
"abstractUrl": "/proceedings-article/sose/2019/144200a319/19RSAu24m1G",
"parentPublication": {
"id": "proceedings/sose/2019/1442/0",
"title": "2019 IEEE International Conference on Service-Oriented System Engineering (SOSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iseeie/2022/6874/0/687400a172",
"title": "Study on Linear Ccd Acquisition Rate for Variable Speed Moving Objects",
"doi": null,
"abstractUrl": "/proceedings-article/iseeie/2022/687400a172/1FWmFqLtsHu",
"parentPublication": {
"id": "proceedings/iseeie/2022/6874/0",
"title": "2022 International Symposium on Electrical, Electronics and Information Engineering (ISEEIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwHz03E",
"title": "2015 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"acronym": "icitbs",
"groupId": "1811384",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzvhvyc",
"doi": "10.1109/ICITBS.2015.195",
"title": "Study on Non-uniform Bed Load Random Motion",
"normalizedTitle": "Study on Non-uniform Bed Load Random Motion",
"abstract": "In natural river sediment particles movement is of a great deal of randomness, and which is always the focus of the scholars at home and abroad, and is also the research difficulty. The non-uniform sediment flume experiment over rough bed was conducted in this study, the movement process of the non-uniform bed load particles over rough bed was observed with the camera, the parameters such as the motion distance, motion velocity and motion/rest time of bed load particles in the observation period were obtained and analyzed, finally the random motion of bed load particles was studied.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In natural river sediment particles movement is of a great deal of randomness, and which is always the focus of the scholars at home and abroad, and is also the research difficulty. The non-uniform sediment flume experiment over rough bed was conducted in this study, the movement process of the non-uniform bed load particles over rough bed was observed with the camera, the parameters such as the motion distance, motion velocity and motion/rest time of bed load particles in the observation period were obtained and analyzed, finally the random motion of bed load particles was studied.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In natural river sediment particles movement is of a great deal of randomness, and which is always the focus of the scholars at home and abroad, and is also the research difficulty. The non-uniform sediment flume experiment over rough bed was conducted in this study, the movement process of the non-uniform bed load particles over rough bed was observed with the camera, the parameters such as the motion distance, motion velocity and motion/rest time of bed load particles in the observation period were obtained and analyzed, finally the random motion of bed load particles was studied.",
"fno": "0464a771",
"keywords": [
"Sediments",
"Rivers",
"Water Resources",
"Cameras",
"Hydroelectric Power Generation",
"Atmospheric Measurements",
"Motion Measurement",
"Random Motion",
"Non Uniform Bed Load",
"Motion Distance",
"Motion Velocity",
"Motion Rest Time"
],
"authors": [
{
"affiliation": null,
"fullName": "Xu Linjuan",
"givenName": "Xu",
"surname": "Linjuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhou Zhenghui",
"givenName": "Zhou",
"surname": "Zhenghui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qin Juntao",
"givenName": "Qin",
"surname": "Juntao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chen Yuanchao",
"givenName": "Chen",
"surname": "Yuanchao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icitbs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "771-774",
"year": "2015",
"issn": null,
"isbn": "978-1-5090-0464-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0464a767",
"articleId": "12OmNBB0c0C",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0464a775",
"articleId": "12OmNqH9heZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isdea/2013/4893/0/06456672",
"title": "Characteristics of Phosphorus Fractions in Different Sediments of River, Pond and Constructed Wetlands in North of China",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2013/06456672/12OmNA14Ab4",
"parentPublication": {
"id": "proceedings/isdea/2013/4893/0",
"title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/1/4296a897",
"title": "Design and Optimization of Swing Bed with High Torque Dynamic Load",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296a897/12OmNAPSMpt",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/1",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2016/0943/0/0943a183",
"title": "Motion Scale: A Body Motion Monitoring System Using Bed-Mounted Wireless Load Cells",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2016/0943a183/12OmNxE2mMf",
"parentPublication": {
"id": "proceedings/chase/2016/0943/0",
"title": "2016 IEEE First International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings/2014/5967/0/5967a360",
"title": "An In-Situ Motion Measurement System for Underwater Sediments Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ithings/2014/5967a360/12OmNxEBzdD",
"parentPublication": {
"id": "proceedings/ithings/2014/5967/0",
"title": "2014 IEEE International Conference on Internet of Things(iThings), and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a933",
"title": "The Application of Edge Detection Algorithm in Bedload Contour Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a933/12OmNxw5B9x",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2013/4893/0/06456676",
"title": "Characteristics and Evaluation for Nitrogen Pollution in Water and Surface Sediments of Xixi Wetland",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2013/06456676/12OmNyL0TFJ",
"parentPublication": {
"id": "proceedings/isdea/2013/4893/0",
"title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2008/3305/3/04666309",
"title": "Fuzzy Comprehensive Evaluation Model for Chinese Sturgeon Habitat Suitability in the Yangtze River",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2008/04666309/12OmNySXEXk",
"parentPublication": {
"id": "proceedings/fskd/2008/3305/3",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icscse/2018/1366/0/08705403",
"title": "Retracted: Review on The Movement of Bed Load Particles in Non-Uniform Sediment",
"doi": null,
"abstractUrl": "/proceedings-article/icscse/2018/08705403/19RSmRLD8Gs",
"parentPublication": {
"id": "proceedings/icscse/2018/1366/0",
"title": "2018 3rd International Conference on Smart City and Systems Engineering (ICSCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icore/2022/3390/0/339000a157",
"title": "Estimation of the Sediment Discharge of Marikina River Basin using Hydrologic Modeling System (HEC-HMS)",
"doi": null,
"abstractUrl": "/proceedings-article/icore/2022/339000a157/1LSOTc4diM0",
"parentPublication": {
"id": "proceedings/icore/2022/3390/0",
"title": "2022 2nd International Conference in Information and Computing Research (iCORE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2021/3596/0/359600a139",
"title": "Design of a sampling device for shallow water sediment",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2021/359600a139/1tMPO6Fvub6",
"parentPublication": {
"id": "proceedings/icedme/2021/3596/0",
"title": "2021 4th International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwKoZdc",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"acronym": "acssc",
"groupId": "1000671",
"volume": "1",
"displayVolume": "1",
"year": "1988",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzw8je4",
"doi": "10.1109/ACSSC.1988.754044",
"title": "Uniqueness Results For Constant Acceleration And Precession Model in 3-DS Motion Estimation Using A Sequence Of Noisy Stereo Images/spl dag/",
"normalizedTitle": "Uniqueness Results For Constant Acceleration And Precession Model in 3-DS Motion Estimation Using A Sequence Of Noisy Stereo Images/spl dag/",
"abstract": "We discuss uniqueness of motion parameter estimates in the kinematic model based approach presented in [1, 2] for the constant acceleration and precession motion model. The kinematic model is designed for the estimation of 3-D motion parameters from a sequence of noisy stereo images. A constructive proof of uniqueness is given by separating the rotational parameters from the translational parameters. If an uniform sampling scheme is applied, we show that three noncollinear feature points in five consecutive binocular image pairs contain all the spatial and temporal information. Furthermore, while the velocity and acceleration parameters are unique, the solution sets have seven configurations in the rotational parameters and three configurations in the translational position vector and the rotation center due to the stroboscopic effect, which is not encountered in two-view motion analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We discuss uniqueness of motion parameter estimates in the kinematic model based approach presented in [1, 2] for the constant acceleration and precession motion model. The kinematic model is designed for the estimation of 3-D motion parameters from a sequence of noisy stereo images. A constructive proof of uniqueness is given by separating the rotational parameters from the translational parameters. If an uniform sampling scheme is applied, we show that three noncollinear feature points in five consecutive binocular image pairs contain all the spatial and temporal information. Furthermore, while the velocity and acceleration parameters are unique, the solution sets have seven configurations in the rotational parameters and three configurations in the translational position vector and the rotation center due to the stroboscopic effect, which is not encountered in two-view motion analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We discuss uniqueness of motion parameter estimates in the kinematic model based approach presented in [1, 2] for the constant acceleration and precession motion model. The kinematic model is designed for the estimation of 3-D motion parameters from a sequence of noisy stereo images. A constructive proof of uniqueness is given by separating the rotational parameters from the translational parameters. If an uniform sampling scheme is applied, we show that three noncollinear feature points in five consecutive binocular image pairs contain all the spatial and temporal information. Furthermore, while the velocity and acceleration parameters are unique, the solution sets have seven configurations in the rotational parameters and three configurations in the translational position vector and the rotation center due to the stroboscopic effect, which is not encountered in two-view motion analysis.",
"fno": "00754044",
"keywords": [
"Acceleration",
"Motion Estimation",
"Kinematics",
"Motion Analysis",
"Image Sampling",
"Image Motion Analysis",
"Motion Measurement",
"Angular Velocity",
"Rotation Measurement",
"Time Measurement"
],
"authors": [
{
"affiliation": "Signal and Image Processing Institute",
"fullName": "Gem-Sun Young",
"givenName": null,
"surname": "Gem-Sun Young",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acssc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1988-01-01T00:00:00",
"pubType": "proceedings",
"pages": "500,501,502,503,504,505,506",
"year": "1988",
"issn": "1058-6393",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00754043",
"articleId": "12OmNxFaLiM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00754045",
"articleId": "12OmNzRHOQF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wvm/1989/1903/0/00047090",
"title": "Experiments and uniqueness results on object structure and kinematics from a sequence of monocular images",
"doi": null,
"abstractUrl": "/proceedings-article/wvm/1989/00047090/12OmNqyUUEF",
"parentPublication": {
"id": "proceedings/wvm/1989/1903/0",
"title": "Proceedings Workshop on Visual Motion",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00132056",
"title": "Computationally inexpensive egomotion determination for a mobile robot using an active camera",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00132056/12OmNvFpExR",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wvm/1991/2153/0/00212779",
"title": "A fast subspace algorithm for recovering rigid motion",
"doi": null,
"abstractUrl": "/proceedings-article/wvm/1991/00212779/12OmNwD1q0y",
"parentPublication": {
"id": "proceedings/wvm/1991/2153/0",
"title": "Proceedings of the IEEE Workshop on Visual Motion",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wvm/1991/2153/0/00212782",
"title": "Recovering observer translation with center-surround operators",
"doi": null,
"abstractUrl": "/proceedings-article/wvm/1991/00212782/12OmNx2QUEl",
"parentPublication": {
"id": "proceedings/wvm/1991/2153/0",
"title": "Proceedings of the IEEE Workshop on Visual Motion",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00340995",
"title": "Parallel dense depth-from-motion on the image understanding architecture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00340995/12OmNxUdv5n",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/2/73102193",
"title": "Binocular estimation of motion and structure from long sequences using optical flow without correspondence",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73102193/12OmNxVV5UM",
"parentPublication": {
"id": "proceedings/icip/1995/7310/2",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576386",
"title": "Stereoscopic recovery of egomotion and structure: models, uniqueness and experimental results",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576386/12OmNxxdZDc",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1988/0862/0/00196312",
"title": "3-D motion estimation using a sequence of noisy stereo images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1988/00196312/12OmNyKa61H",
"parentPublication": {
"id": "proceedings/cvpr/1988/0862/0",
"title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1997/7822/0/78220250",
"title": "The confounding of translation and rotation in reconstruction from multiple views",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1997/78220250/12OmNyQphkA",
"parentPublication": {
"id": "proceedings/cvpr/1997/7822/0",
"title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1990/08/i0735",
"title": "3-D Motion Estimation Using a Sequence of Noisy Stereo Images: Models, Estimation, and Uniqueness Results",
"doi": null,
"abstractUrl": "/journal/tp/1990/08/i0735/13rRUxAASX3",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBSBk6v",
"title": "Dagstuhl '97 - Scientific Visualization Conference",
"acronym": "dagstuhl",
"groupId": "1811924",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAXglN4",
"doi": "10.1109/DAGSTUHL.1997.10014",
"title": "Notes on Computational-Space-Based Ray-Casting for Curvilinear Volumes",
"normalizedTitle": "Notes on Computational-Space-Based Ray-Casting for Curvilinear Volumes",
"abstract": "In this paper we study the computational-space-based (C-space-based) ray-casting algorithm for rendering curvilinear volumes. With a simple counter example, we demonstrate that a proposed C-space-based method may not generate rendering results as accurately as ray-casting in the physical space. We also analyze what needs to be improved and discuss several other issues related to the C-space-based approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we study the computational-space-based (C-space-based) ray-casting algorithm for rendering curvilinear volumes. With a simple counter example, we demonstrate that a proposed C-space-based method may not generate rendering results as accurately as ray-casting in the physical space. We also analyze what needs to be improved and discuss several other issues related to the C-space-based approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we study the computational-space-based (C-space-based) ray-casting algorithm for rendering curvilinear volumes. With a simple counter example, we demonstrate that a proposed C-space-based method may not generate rendering results as accurately as ray-casting in the physical space. We also analyze what needs to be improved and discuss several other issues related to the C-space-based approach.",
"fno": "05030124",
"keywords": [
"Volume Rendering",
"Irregular Grid",
"Curvilinear Grid",
"Ray Casting",
"Computational Space",
"Physical Space",
"Scientific Visualization"
],
"authors": [
{
"affiliation": "Bell Laboratories, Lucent Technologies",
"fullName": "Lichan Hong",
"givenName": "Lichan",
"surname": "Hong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State University of New York at Stony Brook",
"fullName": "Arie Kaufman",
"givenName": "Arie",
"surname": "Kaufman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dagstuhl",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-06-01T00:00:00",
"pubType": "proceedings",
"pages": "124",
"year": "1997",
"issn": null,
"isbn": "0-7695-0503-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01423125",
"articleId": "12OmNzgeLD5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01423126",
"articleId": "1h0N3JNX6cE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscsct/2008/3498/2/3498b783",
"title": "An Octree Ray Casting Algorithm Based on Multi-core CPUs",
"doi": null,
"abstractUrl": "/proceedings-article/iscsct/2008/3498b783/12OmNAgGwg5",
"parentPublication": {
"id": "proceedings/iscsct/2008/3498/1",
"title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2008/1966/0/04475452",
"title": "Efficient Rendering of Extrudable Curvilinear Volumes",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2008/04475452/12OmNBKW9AV",
"parentPublication": {
"id": "proceedings/pacificvis/2008/1966/0",
"title": "IEEE Pacific Visualization Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/1998/9180/0/91800055",
"title": "Adaptive Perspective Ray Casting",
"doi": null,
"abstractUrl": "/proceedings-article/vv/1998/91800055/12OmNBRsVxg",
"parentPublication": {
"id": "proceedings/vv/1998/9180/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2011/9222/0/05772418",
"title": "An Image-Ordered Parallel Volume Ray Casting Using Frame Coherence",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2011/05772418/12OmNvF83nd",
"parentPublication": {
"id": "proceedings/icisa/2011/9222/0",
"title": "2011 International Conference on Information Science and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1998/9176/0/91760247",
"title": "Accelerated Ray-Casting for Curvilinear Volumes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1998/91760247/12OmNyoAA7g",
"parentPublication": {
"id": "proceedings/ieee-vis/1998/9176/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06851204",
"title": "Study of a Ray Casting Technique for the Visualization of Deformable Volumes",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06851204/13rRUEgarBv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1997/02/v0142",
"title": "The Lazy Sweep Ray Casting Algorithm for Rendering Irregular Grids",
"doi": null,
"abstractUrl": "/journal/tg/1997/02/v0142/13rRUxASu0A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/04/v0322",
"title": "Fast Projection-Based Ray-Casting Algorithm for Rendering Curvilinear Volumes",
"doi": null,
"abstractUrl": "/journal/tg/1999/04/v0322/13rRUyY294r",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccgiv/2022/9250/0/925000a183",
"title": "Ellipsoidal ray casting algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccgiv/2022/925000a183/1LxfqGjszTi",
"parentPublication": {
"id": "proceedings/iccgiv/2022/9250/0",
"title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dagstuhl/1997/0503/0/01423109",
"title": "Notes on Computational-Space-Based Ray-Casting for Curvilinear Volumes",
"doi": null,
"abstractUrl": "/proceedings-article/dagstuhl/1997/01423109/1h0N2FRU8Tu",
"parentPublication": {
"id": "proceedings/dagstuhl/1997/0503/0",
"title": "Dagstuhl '97 - Scientific Visualization Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyfdOIg",
"title": "Frontiers of Massively Parallel Processing, Symposium on the",
"acronym": "frontiers",
"groupId": "1000299",
"volume": "0",
"displayVolume": "0",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxisQY8",
"doi": "10.1109/FMPC.1995.380443",
"title": "An optimal parallel algorithm for volume ray casting",
"normalizedTitle": "An optimal parallel algorithm for volume ray casting",
"abstract": "Volume rendering by ray casting is a computationally expensive problem. For interactive volume visualization, rendering has to be done in real time (30 frames/sec). Since the typical 3-D dataset size is 256/sup 3/ the use of parallel processing is imperative. In this paper, we present an O(log n) EREW algorithm for volume rendering using O(n/sup 3/) processors which can be optimized to O(log/sup 3/ n) time using O(n/sup 3//log/sup 3/ n) processors. We have implemented our algorithm on MasPar MP1200. The implementation results show that a frame from 256/sup 3/ data size is generated in 11 seconds using 4096 processors. This time can be further reduced by using a large number of processors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Volume rendering by ray casting is a computationally expensive problem. For interactive volume visualization, rendering has to be done in real time (30 frames/sec). Since the typical 3-D dataset size is 256/sup 3/ the use of parallel processing is imperative. In this paper, we present an O(log n) EREW algorithm for volume rendering using O(n/sup 3/) processors which can be optimized to O(log/sup 3/ n) time using O(n/sup 3//log/sup 3/ n) processors. We have implemented our algorithm on MasPar MP1200. The implementation results show that a frame from 256/sup 3/ data size is generated in 11 seconds using 4096 processors. This time can be further reduced by using a large number of processors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Volume rendering by ray casting is a computationally expensive problem. For interactive volume visualization, rendering has to be done in real time (30 frames/sec). Since the typical 3-D dataset size is 256/sup 3/ the use of parallel processing is imperative. In this paper, we present an O(log n) EREW algorithm for volume rendering using O(n/sup 3/) processors which can be optimized to O(log/sup 3/ n) time using O(n/sup 3//log/sup 3/ n) processors. We have implemented our algorithm on MasPar MP1200. The implementation results show that a frame from 256/sup 3/ data size is generated in 11 seconds using 4096 processors. This time can be further reduced by using a large number of processors.",
"fno": "69650238",
"keywords": [
"Rendering Computer Graphics Parallel Algorithms Parallel Algorithm Volume Ray Casting Interactive Volume Visualization Rendering EREW Algorithm Volume Rendering Ray Casting Volume Visualization"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., Central Florida Univ., Orlando, FL, USA",
"fullName": "V. Goel",
"givenName": "V.",
"surname": "Goel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Central Florida Univ., Orlando, FL, USA",
"fullName": "A. Mukherjee",
"givenName": "A.",
"surname": "Mukherjee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "frontiers",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-02-01T00:00:00",
"pubType": "proceedings",
"pages": "238",
"year": "1995",
"issn": null,
"isbn": "0-8186-6965-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "69650231",
"articleId": "12OmNvA1hDY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "69650246",
"articleId": "12OmNzh5z3P",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrIJqwx",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"acronym": "icdh",
"groupId": "1802037",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAhxjB9",
"doi": "10.1109/ICDH.2014.27",
"title": "Image Edit by Boundary Difference Propagation",
"normalizedTitle": "Image Edit by Boundary Difference Propagation",
"abstract": "This paper introduces a novel approach for image edit. Instead of solving poisson equation, image edit task can be treated as in painting the region of interest by propagating the boundary differences between target and source image. Based on the Fast Marching Method, differences along the boundary can be progressively eliminated to interior layer by layer, and we can get a seamless edit result. It is very simple to implement. Incorporating mixed saliency map, we further improve our edit model to achieve better and more realistic results for images with salient background structure. The experimental results are provided to demonstrate the performance of the algorithms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a novel approach for image edit. Instead of solving poisson equation, image edit task can be treated as in painting the region of interest by propagating the boundary differences between target and source image. Based on the Fast Marching Method, differences along the boundary can be progressively eliminated to interior layer by layer, and we can get a seamless edit result. It is very simple to implement. Incorporating mixed saliency map, we further improve our edit model to achieve better and more realistic results for images with salient background structure. The experimental results are provided to demonstrate the performance of the algorithms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a novel approach for image edit. Instead of solving poisson equation, image edit task can be treated as in painting the region of interest by propagating the boundary differences between target and source image. Based on the Fast Marching Method, differences along the boundary can be progressively eliminated to interior layer by layer, and we can get a seamless edit result. It is very simple to implement. Incorporating mixed saliency map, we further improve our edit model to achieve better and more realistic results for images with salient background structure. The experimental results are provided to demonstrate the performance of the algorithms.",
"fno": "4284a101",
"keywords": [
"Mathematical Model",
"Image Color Analysis",
"Boundary Conditions",
"Vectors",
"Poisson Equations",
"Equations",
"Laplace Equations",
"Saliency",
"Image Edit",
"FMM Fast Marching Method"
],
"authors": [
{
"affiliation": null,
"fullName": "Pan Qi",
"givenName": "Pan",
"surname": "Qi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdh",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-11-01T00:00:00",
"pubType": "proceedings",
"pages": "101-104",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4284-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4284a095",
"articleId": "12OmNAQJzOw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4284a105",
"articleId": "12OmNAMtAPA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cis/2014/7434/0/7434a343",
"title": "The Hilbert Boundary Value Problem for Beltrami Equation in Clifford Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2014/7434a343/12OmNBKmXnZ",
"parentPublication": {
"id": "proceedings/cis/2014/7434/0",
"title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139751",
"title": "Boundary element methods for solving Poisson equations in computer vision problems",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139751/12OmNBd9T57",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icqnm/2010/3952/0/3952a096",
"title": "Combination of Boundary Singularity and Direct Simulation Monte Carlo Methods for Nano-scale Flows",
"doi": null,
"abstractUrl": "/proceedings-article/icqnm/2010/3952a096/12OmNrNh0DY",
"parentPublication": {
"id": "proceedings/icqnm/2010/3952/0",
"title": "Quantum, Nano, and Micro Technologies, First International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2013/2096/0/06673202",
"title": "Conservative finite-difference scheme for the problem of laser pulse propagation in a medium with third-order dispersion",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2013/06673202/12OmNvjgWnP",
"parentPublication": {
"id": "proceedings/ewdts/2013/2096/0",
"title": "2013 11th East-West Design and Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223239",
"title": "On Poisson solvers and semi-direct methods for computing area based optical flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223239/12OmNwbuke1",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/synasc/2014/8447/0/07034672",
"title": "Optimal Homotopy Asymptotic Method for Viscous Boundary Layer Flow in Unbounded Domain",
"doi": null,
"abstractUrl": "/proceedings-article/synasc/2014/07034672/12OmNxE2n33",
"parentPublication": {
"id": "proceedings/synasc/2014/8447/0",
"title": "2014 16th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2008/3278/0/3278a397",
"title": "Image Editing without Color Inconsistency Using Modified Poisson Equation",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2008/3278a397/12OmNyoiYXH",
"parentPublication": {
"id": "proceedings/iih-msp/2008/3278/0",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2006/04/c4032",
"title": "Numerical Solution of the Stationary State Schrödinger Equation Using Transparent Boundary Conditions",
"doi": null,
"abstractUrl": "/magazine/cs/2006/04/c4032/13rRUEgs2PW",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06774477",
"title": "The Natural Helmholtz-Hodge Decomposition for Open-Boundary Flow Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06774477/13rRUxYrbUJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icftic/2022/2195/0/10075200",
"title": "Study on the Fourth Order Discrete Scheme of Finite Difference Method Based on Node Set Vector for Two Dimensional Poisson Equation",
"doi": null,
"abstractUrl": "/proceedings-article/icftic/2022/10075200/1LRl0zXfIJi",
"parentPublication": {
"id": "proceedings/icftic/2022/2195/0",
"title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzSh1ax",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "1991",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBd9T57",
"doi": "10.1109/CVPR.1991.139751",
"title": "Boundary element methods for solving Poisson equations in computer vision problems",
"normalizedTitle": "Boundary element methods for solving Poisson equations in computer vision problems",
"abstract": "The boundary element method (BEM) for solving Poisson's equations is described. Issues in BEM, such as Green's functions, boundary conditions, evaluation of improper integrals, and continuity up to first derivative of solution functions, are discussed. BEM is compared with FEM, the finite element method, in terms of storage and time complexity. The authors discuss application to vision: height from gradient; shape from shading; surface interpolation; brightness based stereo matching; and the optical flow problem. Brief mention is made of some early experimental results on synthetic images.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The boundary element method (BEM) for solving Poisson's equations is described. Issues in BEM, such as Green's functions, boundary conditions, evaluation of improper integrals, and continuity up to first derivative of solution functions, are discussed. BEM is compared with FEM, the finite element method, in terms of storage and time complexity. The authors discuss application to vision: height from gradient; shape from shading; surface interpolation; brightness based stereo matching; and the optical flow problem. Brief mention is made of some early experimental results on synthetic images.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The boundary element method (BEM) for solving Poisson's equations is described. Issues in BEM, such as Green's functions, boundary conditions, evaluation of improper integrals, and continuity up to first derivative of solution functions, are discussed. BEM is compared with FEM, the finite element method, in terms of storage and time complexity. The authors discuss application to vision: height from gradient; shape from shading; surface interpolation; brightness based stereo matching; and the optical flow problem. Brief mention is made of some early experimental results on synthetic images.",
"fno": "00139751",
"keywords": [
"Boundary Elements Methods",
"Computer Vision",
"Poisson Equations",
"Computer Vision",
"Boundary Element Method",
"BEM",
"Greens Functions",
"Boundary Conditions",
"Improper Integrals",
"Continuity",
"FEM",
"Finite Element Method",
"Time Complexity",
"Height From Gradient",
"Shape From Shading",
"Surface Interpolation",
"Brightness Based Stereo Matching",
"Optical Flow",
"Synthetic Images",
"Boundary Element Methods",
"Poisson Equations",
"Greens Function Methods",
"Boundary Conditions",
"Integral Equations",
"Finite Element Methods",
"Shape",
"Interpolation",
"Brightness",
"Image Motion Analysis"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., Worcester Polytech. Inst., MA, USA",
"fullName": "G.G. Gu",
"givenName": "G.G.",
"surname": "Gu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Worcester Polytech. Inst., MA, USA",
"fullName": "M.A. Gennert",
"givenName": "M.A.",
"surname": "Gennert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1991-01-01T00:00:00",
"pubType": "proceedings",
"pages": "546,547,548,549,550,551",
"year": "1991",
"issn": "1063-6919",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00139750",
"articleId": "12OmNzZmZvi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00139752",
"articleId": "12OmNyTfg7h",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dac/1998/964/0/00724470",
"title": "Boundary element method macromodels for 2-D hierarchical capacitance extraction",
"doi": null,
"abstractUrl": "/proceedings-article/dac/1998/00724470/12OmNqIhFXm",
"parentPublication": {
"id": "proceedings/dac/1998/964/0",
"title": "Proceedings 1998 Design and Automation Conference. 35th DAC",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccad/2002/7607/0/01167506",
"title": "Comprehensive frequency-dependent substrate noise analysis using boundary element methods",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/2002/01167506/12OmNrY3LBw",
"parentPublication": {
"id": "proceedings/iccad/2002/7607/0",
"title": "2002 IEEE/ACM International Conference on Computer Aided Design (ICCAD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asp-dac/2000/2311/0/23110447",
"title": "Hierarchical Computation of 3-D Interconnect Capacitance using Direct Boundary Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/asp-dac/2000/23110447/12OmNs0TKVj",
"parentPublication": {
"id": "proceedings/asp-dac/2000/2311/0",
"title": "Asia and South Pacific Design Automation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asp-dac/1999/2329/0/23290093",
"title": "The Hierarchical h-Adaptive 3-D Boundary Element Computation of VLSI Interconnect Capacitance",
"doi": null,
"abstractUrl": "/proceedings-article/asp-dac/1999/23290093/12OmNwErpCI",
"parentPublication": {
"id": "proceedings/asp-dac/1999/2329/0",
"title": "Asia and South Pacific Design Automation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2920/0/00202092",
"title": "A probabilistic approach for solving Poisson equations in computer vision problems",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00202092/12OmNyQGS77",
"parentPublication": {
"id": "proceedings/icpr/1992/2920/0",
"title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2009/3605/2/3605c652",
"title": "Three Dimension Multi-body Contact Boundary Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2009/3605c652/12OmNypIYEW",
"parentPublication": {
"id": "proceedings/cso/2009/3605/2",
"title": "2009 International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/2/73102430",
"title": "Regularisation of the limited data computed tomography problem via the boundary element method",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73102430/12OmNz4SODZ",
"parentPublication": {
"id": "proceedings/icip/1995/7310/2",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a005",
"title": "Boundary Element Method for Diffuse Optical Tomography",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a005/12OmNzfXawp",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a257",
"title": "Boundary Element Analysis of Punch Problem Based on Complementary Theory",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a257/12OmNznkJTI",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccad/2007/1381/0/04397236",
"title": "Impedance extraction for 3-D structures with multiple dielectrics using preconditioned boundary element method",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/2007/04397236/12OmNzsJ7Fk",
"parentPublication": {
"id": "proceedings/iccad/2007/1381/0",
"title": "2007 IEEE/ACM International Conference on Computer Aided Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyRg4lV",
"title": "Quantum, Nano, and Micro Technologies, First International Conference on",
"acronym": "icqnm",
"groupId": "1001549",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrNh0DY",
"doi": "10.1109/ICQNM.2010.25",
"title": "Combination of Boundary Singularity and Direct Simulation Monte Carlo Methods for Nano-scale Flows",
"normalizedTitle": "Combination of Boundary Singularity and Direct Simulation Monte Carlo Methods for Nano-scale Flows",
"abstract": "A novel hybrid method combining the continuum approach based boundary singularity method (BSM) and the molecular approach based direct simulation Monte Carlo (DSMC) is developed and then used to study viscous fibrous filtration flows in the transition flow regime, . This approach may be useful for modeling of detection and signaling in micro-fluidic sensors. The DSMC is applied to an annular region enclosing the solid fiber and the BSM is employed to the entire flow domain. The parameters used in the DSMC and the coupling procedure, such as the number of simulated DSMC particles, the cell size and the size of the coupling zone are determined. It is observed that in the partial-slip flow regime the results obtained by the hybrid BSM-DSMC method match the ones from the BSM combined with the heuristic partial-slip boundary conditions. For higher Knudsen numbers, the difference in pressure drop and velocity is significant. The developed hybrid method is then parallelized by using MPI and extended for multi-fiber filtration flows. The multi-fiber filter flows considered are in the partial-slip and transition regimes. The proposed combined continuum and molecular methodology can incorporate surface chemical reactions and the electromagnetic forces in the DSMC procedure for Knudsen layer.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A novel hybrid method combining the continuum approach based boundary singularity method (BSM) and the molecular approach based direct simulation Monte Carlo (DSMC) is developed and then used to study viscous fibrous filtration flows in the transition flow regime, . This approach may be useful for modeling of detection and signaling in micro-fluidic sensors. The DSMC is applied to an annular region enclosing the solid fiber and the BSM is employed to the entire flow domain. The parameters used in the DSMC and the coupling procedure, such as the number of simulated DSMC particles, the cell size and the size of the coupling zone are determined. It is observed that in the partial-slip flow regime the results obtained by the hybrid BSM-DSMC method match the ones from the BSM combined with the heuristic partial-slip boundary conditions. For higher Knudsen numbers, the difference in pressure drop and velocity is significant. The developed hybrid method is then parallelized by using MPI and extended for multi-fiber filtration flows. The multi-fiber filter flows considered are in the partial-slip and transition regimes. The proposed combined continuum and molecular methodology can incorporate surface chemical reactions and the electromagnetic forces in the DSMC procedure for Knudsen layer.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A novel hybrid method combining the continuum approach based boundary singularity method (BSM) and the molecular approach based direct simulation Monte Carlo (DSMC) is developed and then used to study viscous fibrous filtration flows in the transition flow regime, . This approach may be useful for modeling of detection and signaling in micro-fluidic sensors. The DSMC is applied to an annular region enclosing the solid fiber and the BSM is employed to the entire flow domain. The parameters used in the DSMC and the coupling procedure, such as the number of simulated DSMC particles, the cell size and the size of the coupling zone are determined. It is observed that in the partial-slip flow regime the results obtained by the hybrid BSM-DSMC method match the ones from the BSM combined with the heuristic partial-slip boundary conditions. For higher Knudsen numbers, the difference in pressure drop and velocity is significant. The developed hybrid method is then parallelized by using MPI and extended for multi-fiber filtration flows. The multi-fiber filter flows considered are in the partial-slip and transition regimes. The proposed combined continuum and molecular methodology can incorporate surface chemical reactions and the electromagnetic forces in the DSMC procedure for Knudsen layer.",
"fno": "3952a096",
"keywords": [
"Stokes Equations Partial Slip Boundary Conditions Molecular Methods DSMC Boundary Elements Nano Fluids"
],
"authors": [
{
"affiliation": null,
"fullName": "Shunliu Zhao",
"givenName": "Shunliu",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alex Povitsky",
"givenName": "Alex",
"surname": "Povitsky",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icqnm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-02-01T00:00:00",
"pubType": "proceedings",
"pages": "96-101",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-3952-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3952a092",
"articleId": "12OmNASILV7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3952a102",
"articleId": "12OmNvxKu28",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icsps/2009/3654/0/3654a866",
"title": "Analytic Approximate Solution for Two-Dimensional Steady Slip Flow in Microchannels by Variational Iteration Method",
"doi": null,
"abstractUrl": "/proceedings-article/icsps/2009/3654a866/12OmNAkEU2v",
"parentPublication": {
"id": "proceedings/icsps/2009/3654/0",
"title": "2009 International Conference on Signal Processing Systems (ICSPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2014/6239/0/6239a036",
"title": "Pattern-Aware Sequential Monte Carlo Detection for Generalized Space-Time Shift Keying",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2014/6239a036/12OmNCgJe9u",
"parentPublication": {
"id": "proceedings/cit/2014/6239/0",
"title": "2014 IEEE International Conference on Computer and Information Technology (CIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdc/2000/6638/5/00914612",
"title": "Shape design of channel flows for steady, incompressible flows",
"doi": null,
"abstractUrl": "/proceedings-article/cdc/2000/00914612/12OmNrnJ6Wk",
"parentPublication": {
"id": "proceedings/cdc/2000/6638/5",
"title": "Proceedings of the 39th IEEE Conference on Decision and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1994/6627/0/00346337",
"title": "Introducing alpha shapes for the analysis of path integral Monte Carlo results",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1994/00346337/12OmNscOUa6",
"parentPublication": {
"id": "proceedings/visual/1994/6627/0",
"title": "Proceedings Visualization '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/shpcc/1994/5680/0/00296641",
"title": "Adaptive runtime support for direct simulation Monte Carlo methods on distributed memory architectures",
"doi": null,
"abstractUrl": "/proceedings-article/shpcc/1994/00296641/12OmNxXCGJt",
"parentPublication": {
"id": "proceedings/shpcc/1994/5680/0",
"title": "Proceedings of IEEE Scalable High Performance Computing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmens/2009/3938/0/3938a213",
"title": "Computational Experience with Nano-material Science Quantum Monte Carlo Modeling on BlueGene/L",
"doi": null,
"abstractUrl": "/proceedings-article/icmens/2009/3938a213/12OmNyPQ4Ow",
"parentPublication": {
"id": "proceedings/icmens/2009/3938/0",
"title": "MEMS, NANO, and Smart Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2006/2517/0/25170353",
"title": "A Monte Carlo-Based Fiber Tracking Algorithm using Diffusion Tensor MRI",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2006/25170353/12OmNz4SOom",
"parentPublication": {
"id": "proceedings/cbms/2006/2517/0",
"title": "19th IEEE Symposium on Computer-Based Medical Systems (CBMS'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cimsim/2010/4262/0/4262a565",
"title": "No Slip and Free Slip Boundary Conditions for Liquid Flow in Obstructed Straight Microchannel",
"doi": null,
"abstractUrl": "/proceedings-article/cimsim/2010/4262a565/12OmNzSh13C",
"parentPublication": {
"id": "proceedings/cimsim/2010/4262/0",
"title": "Computational Intelligence, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmens/2004/2189/0/01508970",
"title": "Liquids: The Holy Grail of Microfluidics Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icmens/2004/01508970/1htBgZxf2XS",
"parentPublication": {
"id": "proceedings/icmens/2004/2189/0",
"title": "International Conference on MEMS, NANO and Smart Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKisM",
"title": "2018 IEEE 25th International Conference on High Performance Computing Workshops (HiPCW)",
"acronym": "hipcw",
"groupId": "1811646",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WXIkIl",
"doi": "10.1109/HiPCW.2018.8634138",
"title": "Acceleration of a 3D Immersed Boundary Solver Using OpenACC",
"normalizedTitle": "Acceleration of a 3D Immersed Boundary Solver Using OpenACC",
"abstract": "Immersed-boundary methods (IBM) have been constantly gaining popularity and are increasingly expanding to new areas of applications in computational mechanics since last three decades due to the potentials of their application in modeling complex multiphysics phenomena which involves flow over complex and moving boundaries. The specific advantages of an immersed boundary method are due to its accuracy and simplicity. As this method uses a fixed structured Cartesian mesh, the complex grid generation processes can be fully avoided whereas the complex/moving boundary is described using another surface mesh. The computational overheads in an immersed boundary implementation can be very high due to expensive search and interpolation steps through which the effects of the boundary conditions on the surface mesh are translated to the fixed Cartesian volume mesh. Therefore, computationally efficient numerical implementation of an IBM solver is of extreme importance to researchers. This paper presents an accelerated discrete finite difference based immersed boundary (IB) solver that is used to study the external flow behavior around complex geometries. The flow is assumed to be incompressible. The immersed boundary solver is parallelized using OpenACC for quick acceleration with minimal code changes and to ensure performance portability across both GPUs and multicore CPUs. Our experimental results indicate that the OpenACC-based IB solver run on a NVIDIA Tesla P100 GPU is 21x faster than the sequential legacy solver and is 3.3x faster than the OpenACC-based IB solver run on a dual socket Intel Xeon Gold 6148, 20 core CPU. The recirculation lengths obtained for Reynolds numbers of 20 and 40 and the Strouhal number for Reynolds number 100, for a standard flow visualization problem over a fixed cylinder, are in accordance with the reported data in available literature, thereby validating the accuracy of the parallel solver. We also analyze the performance of the accelerated solver on different GPU architectures: Kepler, Pascal and Volta.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersed-boundary methods (IBM) have been constantly gaining popularity and are increasingly expanding to new areas of applications in computational mechanics since last three decades due to the potentials of their application in modeling complex multiphysics phenomena which involves flow over complex and moving boundaries. The specific advantages of an immersed boundary method are due to its accuracy and simplicity. As this method uses a fixed structured Cartesian mesh, the complex grid generation processes can be fully avoided whereas the complex/moving boundary is described using another surface mesh. The computational overheads in an immersed boundary implementation can be very high due to expensive search and interpolation steps through which the effects of the boundary conditions on the surface mesh are translated to the fixed Cartesian volume mesh. Therefore, computationally efficient numerical implementation of an IBM solver is of extreme importance to researchers. This paper presents an accelerated discrete finite difference based immersed boundary (IB) solver that is used to study the external flow behavior around complex geometries. The flow is assumed to be incompressible. The immersed boundary solver is parallelized using OpenACC for quick acceleration with minimal code changes and to ensure performance portability across both GPUs and multicore CPUs. Our experimental results indicate that the OpenACC-based IB solver run on a NVIDIA Tesla P100 GPU is 21x faster than the sequential legacy solver and is 3.3x faster than the OpenACC-based IB solver run on a dual socket Intel Xeon Gold 6148, 20 core CPU. The recirculation lengths obtained for Reynolds numbers of 20 and 40 and the Strouhal number for Reynolds number 100, for a standard flow visualization problem over a fixed cylinder, are in accordance with the reported data in available literature, thereby validating the accuracy of the parallel solver. We also analyze the performance of the accelerated solver on different GPU architectures: Kepler, Pascal and Volta.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersed-boundary methods (IBM) have been constantly gaining popularity and are increasingly expanding to new areas of applications in computational mechanics since last three decades due to the potentials of their application in modeling complex multiphysics phenomena which involves flow over complex and moving boundaries. The specific advantages of an immersed boundary method are due to its accuracy and simplicity. As this method uses a fixed structured Cartesian mesh, the complex grid generation processes can be fully avoided whereas the complex/moving boundary is described using another surface mesh. The computational overheads in an immersed boundary implementation can be very high due to expensive search and interpolation steps through which the effects of the boundary conditions on the surface mesh are translated to the fixed Cartesian volume mesh. Therefore, computationally efficient numerical implementation of an IBM solver is of extreme importance to researchers. This paper presents an accelerated discrete finite difference based immersed boundary (IB) solver that is used to study the external flow behavior around complex geometries. The flow is assumed to be incompressible. The immersed boundary solver is parallelized using OpenACC for quick acceleration with minimal code changes and to ensure performance portability across both GPUs and multicore CPUs. Our experimental results indicate that the OpenACC-based IB solver run on a NVIDIA Tesla P100 GPU is 21x faster than the sequential legacy solver and is 3.3x faster than the OpenACC-based IB solver run on a dual socket Intel Xeon Gold 6148, 20 core CPU. The recirculation lengths obtained for Reynolds numbers of 20 and 40 and the Strouhal number for Reynolds number 100, for a standard flow visualization problem over a fixed cylinder, are in accordance with the reported data in available literature, thereby validating the accuracy of the parallel solver. We also analyze the performance of the accelerated solver on different GPU architectures: Kepler, Pascal and Volta.",
"fno": "08634138",
"keywords": [
"Computational Fluid Dynamics",
"Coprocessors",
"External Flows",
"Finite Difference Methods",
"Flow Simulation",
"Flow Visualisation",
"Graphics Processing Units",
"Interpolation",
"Mesh Generation",
"Multiprocessing Systems",
"Parallel Architectures",
"Accelerated Solver",
"Immersed Boundary Solver",
"Immersed Boundary Methods",
"Computational Mechanics",
"Complex Multiphysics Phenomena",
"Immersed Boundary Method",
"Fixed Structured Cartesian Mesh",
"Complex Grid Generation",
"Surface Mesh",
"Computational Overheads",
"Immersed Boundary Implementation",
"Boundary Conditions",
"Fixed Cartesian Volume Mesh",
"Computationally Efficient Numerical Implementation",
"IBM Solver",
"Accelerated Discrete Finite Difference",
"Complex Geometries",
"Open ACC Based IB",
"Sequential Legacy Solver",
"Parallel Solver",
"Graphics Processing Units",
"Acceleration",
"Mathematical Model",
"Boundary Conditions",
"Geometry",
"Multicore Processing",
"Computational Fluid Dynamics",
"Immersed Boundary Method",
"Multi Physics Numerical Solver",
"Finite Difference Method",
"High Performance Computing",
"GPGPU Computing"
],
"authors": [
{
"affiliation": "Department of Aerospace Engineering",
"fullName": "Apurva Raj",
"givenName": "Apurva",
"surname": "Raj",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical Engineering, Indian Institute of Technology Kharagpur, Kharagpur, 721302, India",
"fullName": "Somnath Roy",
"givenName": "Somnath",
"surname": "Roy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA Graphics Pvt. Ltd., C-1 “Jacaranda”, Manyata Embassy Business Park, Bangalore, 560045, India",
"fullName": "Nagavijayalakshmi Vydyanathar",
"givenName": "Nagavijayalakshmi",
"surname": "Vydyanathar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA Graphics Pvt. Ltd., C-1 “Jacaranda”, Manyata Embassy Business Park, Bangalore, 560045, India",
"fullName": "Bharatkumar Sharma",
"givenName": "Bharatkumar",
"surname": "Sharma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hipcw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-12-01T00:00:00",
"pubType": "proceedings",
"pages": "65-73",
"year": "2018",
"issn": null,
"isbn": "978-1-7281-0114-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08634225",
"articleId": "17D45WnnFUG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08634217",
"articleId": "17D45WHONq4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ipdpsw/2017/3408/0/07965108",
"title": "Implementing the OpenACC Data Model",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2017/07965108/12OmNAFWORF",
"parentPublication": {
"id": "proceedings/ipdpsw/2017/3408/0",
"title": "2017 IEEE International Parallel and Distributed Processing Symposium: Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2010/6812/1/05533150",
"title": "Dynamic Analysis of Tall-Pier Aqueduct Using ODE Solver",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2010/05533150/12OmNAS9zvg",
"parentPublication": {
"id": "proceedings/cso/2010/6812/1",
"title": "2010 Third International Joint Conference on Computational Science and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdp/2016/8776/0/8776a468",
"title": "An OpenACC Optimizer for Accelerating Histogram Computation on a GPU",
"doi": null,
"abstractUrl": "/proceedings-article/pdp/2016/8776a468/12OmNAtK4mp",
"parentPublication": {
"id": "proceedings/pdp/2016/8776/0",
"title": "2016 24th Euromicro International Conference on Parallel, Distributed, and Network-Based Processing (PDP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdcat/2016/5081/0/07943324",
"title": "Performance and Portability Studies with OpenACC Accelerated Version of GTC-P",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943324/12OmNBW0vzO",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waccpd/2016/6152/0/07836576",
"title": "Acceleration of Element-by-Element Kernel in Unstructured Implicit Low-Order Finite-Element Earthquake Simulation Using OpenACC on Pascal GPUs",
"doi": null,
"abstractUrl": "/proceedings-article/waccpd/2016/07836576/12OmNqH9hhQ",
"parentPublication": {
"id": "proceedings/waccpd/2016/6152/0",
"title": "2016 Third Workshop on Accelerator Programming using Directives (WACCPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waccpd/2014/6753/0/6753a047",
"title": "Accelerating a C++ CFD Code with OpenACC",
"doi": null,
"abstractUrl": "/proceedings-article/waccpd/2014/6753a047/12OmNx5Yv6E",
"parentPublication": {
"id": "proceedings/waccpd/2014/6753/0",
"title": "2014 First Workshop on Accelerator Programming using Directives (WACCPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waccpd/2014/6753/0/6753a019",
"title": "Achieving Portability and Performance through OpenACC",
"doi": null,
"abstractUrl": "/proceedings-article/waccpd/2014/6753a019/12OmNz5JBSK",
"parentPublication": {
"id": "proceedings/waccpd/2014/6753/0",
"title": "2014 First Workshop on Accelerator Programming using Directives (WACCPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06171181",
"title": "A Multigrid Fluid Pressure Solver Handling Separating Solid Boundary Conditions",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06171181/13rRUxlgxTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2022/01/09676479",
"title": "Portable Acceleration of Materials Modeling Software: CASTEP, GPUs, and OpenACC",
"doi": null,
"abstractUrl": "/magazine/cs/2022/01/09676479/1A3doEbcXhC",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2019/5268/0/526800a176",
"title": "Parallelization of Direct-Forcing Immersed Boundary Method Using OpenACC",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2019/526800a176/1gysEBR0e4w",
"parentPublication": {
"id": "proceedings/candarw/2019/5268/0",
"title": "2019 Seventh International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNs0C9QC",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"acronym": "is3c",
"groupId": "1801670",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBJw9Uc",
"doi": "10.1109/IS3C.2016.64",
"title": "Comparison of Biased and Unbiased Sampling Algorithms Using Graph Metrics",
"normalizedTitle": "Comparison of Biased and Unbiased Sampling Algorithms Using Graph Metrics",
"abstract": "Degree distribution, hierarchy, clustering and small-world are typical graph features to measure the structure of complex networks. This paper uses these features to compare and evaluate the performances of biased and unbiased sampling algorithms on two types of scale-free networks. The numerical analysis verifies that the biased sampling performs better than the unbiased sampling on networks in which high-degree nodes are loosely interconnected. Our work is useful for the in-depth understanding of the biased and unbiased samplings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Degree distribution, hierarchy, clustering and small-world are typical graph features to measure the structure of complex networks. This paper uses these features to compare and evaluate the performances of biased and unbiased sampling algorithms on two types of scale-free networks. The numerical analysis verifies that the biased sampling performs better than the unbiased sampling on networks in which high-degree nodes are loosely interconnected. Our work is useful for the in-depth understanding of the biased and unbiased samplings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Degree distribution, hierarchy, clustering and small-world are typical graph features to measure the structure of complex networks. This paper uses these features to compare and evaluate the performances of biased and unbiased sampling algorithms on two types of scale-free networks. The numerical analysis verifies that the biased sampling performs better than the unbiased sampling on networks in which high-degree nodes are loosely interconnected. Our work is useful for the in-depth understanding of the biased and unbiased samplings.",
"fno": "3071a212",
"keywords": [
"Measurement",
"Clustering Algorithms",
"Complex Networks",
"Social Network Services",
"Internet Topology",
"Approximation Algorithms",
"Atmospheric Modeling",
"Performance Evaluation",
"Biased Sampling",
"Unbiased Sampling",
"Graph Metric",
"Scale Free Network"
],
"authors": [
{
"affiliation": null,
"fullName": "Bo Jiao",
"givenName": "Bo",
"surname": "Jiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuan-Ping Nie",
"givenName": "Yuan-Ping",
"surname": "Nie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rong-Hua Guo",
"givenName": "Rong-Hua",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yi-Can Jin",
"givenName": "Yi-Can",
"surname": "Jin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xun-Long Pang",
"givenName": "Xun-Long",
"surname": "Pang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhe Han",
"givenName": "Zhe",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ying Zhou",
"givenName": "Ying",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "is3c",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "212-215",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3071-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3071a208",
"articleId": "12OmNzX6cfG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3071a216",
"articleId": "12OmNxwENiZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/focs/1984/0591/0/0715944",
"title": "Independent Unbiased Coin Flips From A Correlated Biased Source: A Finite State Markov Chain",
"doi": null,
"abstractUrl": "/proceedings-article/focs/1984/0715944/12OmNC8dge7",
"parentPublication": {
"id": "proceedings/focs/1984/0591/0",
"title": "25th Annual Symposium onFoundations of Computer Science, 1984.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cyberc/2011/4557/0/4557a357",
"title": "Unbiased Sampling of Bipartite Graph",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2011/4557a357/12OmNy5zsnM",
"parentPublication": {
"id": "proceedings/cyberc/2011/4557/0",
"title": "2011 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2017/3835/0/3835a455",
"title": "Edge-Based Wedge Sampling to Estimate Triangle Counts in Very Large Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2017/3835a455/12OmNzBwGBP",
"parentPublication": {
"id": "proceedings/icdm/2017/3835/0",
"title": "2017 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cyberc/2016/5154/0/07864275",
"title": "Performance of Two Normalized Laplacian Spectral Features on Sampling Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2016/07864275/12OmNzlUKq3",
"parentPublication": {
"id": "proceedings/cyberc/2016/5154/0",
"title": "2016 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2009/02/04637905",
"title": "On unbiased sampling for unstructured peer-to-peer networks",
"doi": null,
"abstractUrl": "/journal/nt/2009/02/04637905/13rRUEgs2IY",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06811174",
"title": "Unbiased Sampling and Meshing of Isosurfaces",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06811174/13rRUxNmPDU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2017/05/07748517",
"title": "An Unbiased MCMC FPGA-Based Accelerator in the Land of Custom Precision Arithmetic",
"doi": null,
"abstractUrl": "/journal/tc/2017/05/07748517/13rRUypp574",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2018/5035/0/08621872",
"title": "BiasedWalk: Biased Sampling for Representation Learning on Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2018/08621872/17D45XoXP5l",
"parentPublication": {
"id": "proceedings/big-data/2018/5035/0",
"title": "2018 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d713",
"title": "Unbiased Scene Graph Generation From Biased Training",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d713/1m3o31iArJe",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2020/8009/0/800900a072",
"title": "STULL: Unbiased Online Sampling for Visual Exploration of Large Spatiotemporal Data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2020/800900a072/1q7jwDf9eTK",
"parentPublication": {
"id": "proceedings/vast/2020/8009/0",
"title": "2020 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNB8Cj8T",
"title": "2009 11th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing",
"acronym": "synasc",
"groupId": "1001577",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvDqsUX",
"doi": "10.1109/SYNASC.2009.56",
"title": "Monte Carlo Variance Reduction. Importance Sampling Techniques",
"normalizedTitle": "Monte Carlo Variance Reduction. Importance Sampling Techniques",
"abstract": "In this paper we investigate some Importance Sampling strategies and we apply them for the first time to the pricing of the spread options. We compare the Least Squares method to the f-divergence method in order to choose the importance sampling functions. Our numerical results reveals that the use of the divergences is frequently less computationally and time costly.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we investigate some Importance Sampling strategies and we apply them for the first time to the pricing of the spread options. We compare the Least Squares method to the f-divergence method in order to choose the importance sampling functions. Our numerical results reveals that the use of the divergences is frequently less computationally and time costly.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we investigate some Importance Sampling strategies and we apply them for the first time to the pricing of the spread options. We compare the Least Squares method to the f-divergence method in order to choose the importance sampling functions. Our numerical results reveals that the use of the divergences is frequently less computationally and time costly.",
"fno": "3964a137",
"keywords": [
"Monte Carlo Method",
"Importance Sampling",
"Divergence"
],
"authors": [
{
"affiliation": null,
"fullName": "Olariu Emanuel Florentin",
"givenName": "Olariu Emanuel",
"surname": "Florentin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "synasc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "137-141",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3964-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3964a130",
"articleId": "12OmNwKGApM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3964a142",
"articleId": "12OmNy2Jt5L",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2012/4814/0/4814a268",
"title": "Precomputed Radiance Transfer as a Variance Reduction Technique -- A Small Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a268/12OmNButq4G",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300059",
"title": "Monte Carlo Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300059/12OmNCdBDFe",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2006/0693/0/04061554",
"title": "Filter Importance Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2006/04061554/12OmNvDI3SC",
"parentPublication": {
"id": "proceedings/rt/2006/0693/0",
"title": "IEEE Symposium on Interactive Ray Tracing 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2004/8781/0/87810033",
"title": "Interactive Transfer Function Control for Monte Carlo Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2004/87810033/12OmNwGqBoE",
"parentPublication": {
"id": "proceedings/vv/2004/8781/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2009/3762/0/3762b301",
"title": "Volume Visualization with Grid-Independent Adaptive Monte Carlo Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2009/3762b301/12OmNxG1yS8",
"parentPublication": {
"id": "proceedings/iih-msp/2009/3762/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/infcom/2001/7016/1/00916726",
"title": "Efficient importance sampling for Monte Carlo simulation of multicast networks",
"doi": null,
"abstractUrl": "/proceedings-article/infcom/2001/00916726/12OmNxRF786",
"parentPublication": {
"id": "proceedings/infcom/2001/7016/1",
"title": "Proceedings IEEE INFOCOM 2001. Conference on Computer Communications. Twentieth Annual Joint Conference of the IEEE Computer and Communications Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wsc/2002/7614/1/01172874",
"title": "Adaptive Monte Carlo methods for rare event simulations",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2002/01172874/12OmNyRg4k7",
"parentPublication": {
"id": "proceedings/wsc/2002/7614/1",
"title": "Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2005/9313/0/01577198",
"title": "Adaptive sampling with Renyi entropy in Monte Carlo path tracing",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2005/01577198/12OmNzEVS1q",
"parentPublication": {
"id": "proceedings/isspit/2005/9313/0",
"title": "2005 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bife/2009/3705/0/3705a386",
"title": "Nonlinear VaR Model of FX Options Portfolio Based on Importance Sampling Technique",
"doi": null,
"abstractUrl": "/proceedings-article/bife/2009/3705a386/12OmNzcxZnP",
"parentPublication": {
"id": "proceedings/bife/2009/3705/0",
"title": "2009 International Conference on Business Intelligence and Financial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413313",
"title": "Low-Cost Lipschitz-Independent Adaptive Importance Sampling of Stochastic Gradients",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413313/1tmimgMil7a",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwCJP0g",
"title": "Proceedings First International Symposium on Uncertainty Modeling and Analysis",
"acronym": "isuma",
"groupId": "1000771",
"volume": "0",
"displayVolume": "0",
"year": "1990",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwbukis",
"doi": "10.1109/ISUMA.1990.151221",
"title": "Structural reliability evaluation by importance sampling and Kalman filter",
"normalizedTitle": "Structural reliability evaluation by importance sampling and Kalman filter",
"abstract": "A simulation method is proposed in which data are effectively sampled from an importance sampling density function and the probabilities of limit state functions are adaptively updated by using Kalman filter. The important constants for the procedure proposed are examined using a highly nonlinear problem.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "A simulation method is proposed in which data are effectively sampled from an importance sampling density function and the probabilities of limit state functions are adaptively updated by using Kalman filter. The important constants for the procedure proposed are examined using a highly nonlinear problem.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A simulation method is proposed in which data are effectively sampled from an importance sampling density function and the probabilities of limit state functions are adaptively updated by using Kalman filter. The important constants for the procedure proposed are examined using a highly nonlinear problem.",
"fno": "00151221",
"keywords": [
"Kalman Filters",
"Reliability Theory",
"Structural Reliability Evaluation",
"Limit State Function Probabilities",
"Kalman Filter",
"Importance Sampling Density Function",
"Monte Carlo Methods",
"Sampling Methods",
"Density Functional Theory",
"Probability Distribution",
"Probability Density Function",
"Random Variables",
"Reliability",
"State Estimation",
"Shape",
"Kernel"
],
"authors": [
{
"affiliation": "Musashi Inst. of Technol., Tokyo, Japan",
"fullName": "M. Hoshiya",
"givenName": "M.",
"surname": "Hoshiya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M. Fujita",
"givenName": "M.",
"surname": "Fujita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "H. Kuroda",
"givenName": "H.",
"surname": "Kuroda",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isuma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1990-01-01T00:00:00",
"pubType": "proceedings",
"pages": "45,46,47,48",
"year": "1990",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00151220",
"articleId": "12OmNC8MsKz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00151222",
"articleId": "12OmNqNosbQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccad/2010/8194/0/05654259",
"title": "Sequential importance sampling for low-probability and high-dimensional SRAM yield analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/2010/05654259/12OmNro0HXm",
"parentPublication": {
"id": "proceedings/iccad/2010/8194/0",
"title": "2010 IEEE/ACM International Conference on Computer-Aided Design (ICCAD 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/infcom/2001/7016/1/00916726",
"title": "Efficient importance sampling for Monte Carlo simulation of multicast networks",
"doi": null,
"abstractUrl": "/proceedings-article/infcom/2001/00916726/12OmNxRF786",
"parentPublication": {
"id": "proceedings/infcom/2001/7016/1",
"title": "Proceedings IEEE INFOCOM 2001. Conference on Computer Communications. Twentieth Annual Joint Conference of the IEEE Computer and Communications Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761153",
"title": "SVD based Kalman particle filter for robust visual tracking",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761153/12OmNyRPgKl",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587639",
"title": "An importance sampling approach to learning structural representations of shape",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587639/12OmNz61d0x",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761408",
"title": "Edge-preserving unscented Kalman filter for speckle reduction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761408/12OmNzFv4hS",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2015/8493/0/8493a607",
"title": "Stream Clustering: Efficient Kernel-Based Approximation Using Importance Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2015/8493a607/12OmNzmLxFl",
"parentPublication": {
"id": "proceedings/icdmw/2015/8493/0",
"title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2017/03/07564452",
"title": "High-Dimensional and Multiple-Failure-Region Importance Sampling for SRAM Yield Analysis",
"doi": null,
"abstractUrl": "/journal/si/2017/03/07564452/13rRUwjGoJf",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2008/02/mmu2008020052",
"title": "Importance Sampling-Based Unscented Kalman Filter for Film-Grain Noise Removal",
"doi": null,
"abstractUrl": "/magazine/mu/2008/02/mmu2008020052/13rRUxBa58o",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/1993/03/00234852",
"title": "Statistical optimization of dynamic importance sampling parameters for efficient simulation of communication networks",
"doi": null,
"abstractUrl": "/journal/nt/1993/03/00234852/13rRUyY291G",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413313",
"title": "Low-Cost Lipschitz-Independent Adaptive Importance Sampling of Stochastic Gradients",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413313/1tmimgMil7a",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrJAe1a",
"title": "2016 IEEE 57th Annual Symposium on Foundations of Computer Science (FOCS)",
"acronym": "focs",
"groupId": "1000292",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzh5z3r",
"doi": "10.1109/FOCS.2016.96",
"title": "A Fast and Simple Unbiased Estimator for Network (Un)reliability",
"normalizedTitle": "A Fast and Simple Unbiased Estimator for Network (Un)reliability",
"abstract": "The following is an unbiased estimator for the disconnectionprobability of an n-vertex graph with min-cut c whenevery edge fails independently with probability p: (i) contractevery edge independently with probability 1n2=c, (ii) compute(by brute force) the disconnection probability of the resulting tinygraph if each edge fails with probability n2=cp. We give a short,simple, self-contained proof that the estimator can be computedin linear time and has relative variance O(n2). Combining thesetwo facts with a standard sparsification argument yields anO(n3 log n)-time algorithm for estimating the (un)reliability ofa network. We also show how the technique can be used tocreate unbiased samples of disconnected networks",
"abstracts": [
{
"abstractType": "Regular",
"content": "The following is an unbiased estimator for the disconnectionprobability of an n-vertex graph with min-cut c whenevery edge fails independently with probability p: (i) contractevery edge independently with probability 1n2=c, (ii) compute(by brute force) the disconnection probability of the resulting tinygraph if each edge fails with probability n2=cp. We give a short,simple, self-contained proof that the estimator can be computedin linear time and has relative variance O(n2). Combining thesetwo facts with a standard sparsification argument yields anO(n3 log n)-time algorithm for estimating the (un)reliability ofa network. We also show how the technique can be used tocreate unbiased samples of disconnected networks",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The following is an unbiased estimator for the disconnectionprobability of an n-vertex graph with min-cut c whenevery edge fails independently with probability p: (i) contractevery edge independently with probability 1n2=c, (ii) compute(by brute force) the disconnection probability of the resulting tinygraph if each edge fails with probability n2=cp. We give a short,simple, self-contained proof that the estimator can be computedin linear time and has relative variance O(n2). Combining thesetwo facts with a standard sparsification argument yields anO(n3 log n)-time algorithm for estimating the (un)reliability ofa network. We also show how the technique can be used tocreate unbiased samples of disconnected networks",
"fno": "3933a635",
"keywords": [
"Reliability",
"Approximation Algorithms",
"Monte Carlo Methods",
"Runtime",
"Algorithm Design And Analysis",
"Computer Network Reliability",
"Contracts"
],
"authors": [
{
"affiliation": null,
"fullName": "David R. Karger",
"givenName": "David R.",
"surname": "Karger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "focs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-10-01T00:00:00",
"pubType": "proceedings",
"pages": "635-644",
"year": "2016",
"issn": "0272-5428",
"isbn": "978-1-5090-3933-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3933a625",
"articleId": "12OmNwDACdw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3933a645",
"articleId": "12OmNx4yvAb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2015/9504/0/9504a659",
"title": "Top-k Reliability Search on Uncertain Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2015/9504a659/12OmNx6xHkZ",
"parentPublication": {
"id": "proceedings/icdm/2015/9504/0",
"title": "2015 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icns/2008/3094/0/3094a122",
"title": "A Monte Carlo Method for Estimating the Extended All-Terminal Reliability",
"doi": null,
"abstractUrl": "/proceedings-article/icns/2008/3094a122/12OmNzwHv9W",
"parentPublication": {
"id": "proceedings/icns/2008/3094/0",
"title": "Fourth International Conference on Networking and Services (icns 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/2017/3464/0/3464a755",
"title": "Faster (and Still Pretty Simple) Unbiased Estimators for Network (Un)reliability",
"doi": null,
"abstractUrl": "/proceedings-article/focs/2017/3464a755/12OmNzxgHyg",
"parentPublication": {
"id": "proceedings/focs/2017/3464/0",
"title": "2017 IEEE 58th Annual Symposium on Foundations of Computer Science (FOCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2009/3735/6/3735f277",
"title": "Fuzzy Travel Time Reliability with Uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2009/3735f277/12OmNzxyiE4",
"parentPublication": {
"id": "proceedings/fskd/2009/3735/6",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2011/06/05759108",
"title": "Reliability in Layered Networks with Random Link Failures",
"doi": null,
"abstractUrl": "/journal/nt/2011/06/05759108/13rRUwjGoIz",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06811174",
"title": "Unbiased Sampling and Meshing of Isosurfaces",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06811174/13rRUxNmPDU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2017/05/07748517",
"title": "An Unbiased MCMC FPGA-Based Accelerator in the Land of Custom Precision Arithmetic",
"doi": null,
"abstractUrl": "/journal/tc/2017/05/07748517/13rRUypp574",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsn/2022/1693/0/169300a307",
"title": "Exploiting monotonicity and symmetry for efficient simulation of highly dependable systems",
"doi": null,
"abstractUrl": "/proceedings-article/dsn/2022/169300a307/1Fixbyigc4U",
"parentPublication": {
"id": "proceedings/dsn/2022/1693/0",
"title": "2022 52nd Annual IEEE/IFIP International Conference on Dependable Systems and Networks (DSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cscloud-edgecom/2020/6550/0/09171000",
"title": "Assessing the Reliability of Hybrid Clouds with Monte Carlo Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cscloud-edgecom/2020/09171000/1mqcy4LvffG",
"parentPublication": {
"id": "proceedings/cscloud-edgecom/2020/6550/0",
"title": "2020 7th IEEE International Conference on Cyber Security and Cloud Computing (CSCloud)/2020 6th IEEE International Conference on Edge Computing and Scalable Cloud (EdgeCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aemcse/2021/1596/0/159600a870",
"title": "Reliability Evaluation of phase-mission Systems Based on discrete-time Bayesian network",
"doi": null,
"abstractUrl": "/proceedings-article/aemcse/2021/159600a870/1wcdluAjFIc",
"parentPublication": {
"id": "proceedings/aemcse/2021/1596/0",
"title": "2021 4th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAndiq9",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"acronym": "pacificvis",
"groupId": "1001657",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs5rkPo",
"doi": "10.1109/PacificVis.2013.6596128",
"title": "Visual summaries for graph collections",
"normalizedTitle": "Visual summaries for graph collections",
"abstract": "Graphs can be used to represent a variety of information, from molecular structures to biological pathways to computational workflows. With a growing volume of data represented as graphs, the problem of understanding and analyzing the variations in a collection of graphs is of increasing importance. We present an algorithm to compute a single summary graph that efficiently encodes an entire collection of graphs by finding and merging similar nodes and edges. Instead of only merging nodes and edges that are exactly the same, we use domain-specific comparison functions to collapse similar nodes and edges which allows us to generate more compact representations of the collection. In addition, we have developed methods that allow users to interactively control the display of these summary graphs. These interactions include the ability to highlight individual graphs in the summary, control the succinctness of the summary, and explicitly define when specific nodes should or should not be merged. We show that our approach to generating and interacting with graph summaries leads to a better understanding of a graph collection by allowing users to more easily identify common substructures and key differences between graphs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Graphs can be used to represent a variety of information, from molecular structures to biological pathways to computational workflows. With a growing volume of data represented as graphs, the problem of understanding and analyzing the variations in a collection of graphs is of increasing importance. We present an algorithm to compute a single summary graph that efficiently encodes an entire collection of graphs by finding and merging similar nodes and edges. Instead of only merging nodes and edges that are exactly the same, we use domain-specific comparison functions to collapse similar nodes and edges which allows us to generate more compact representations of the collection. In addition, we have developed methods that allow users to interactively control the display of these summary graphs. These interactions include the ability to highlight individual graphs in the summary, control the succinctness of the summary, and explicitly define when specific nodes should or should not be merged. We show that our approach to generating and interacting with graph summaries leads to a better understanding of a graph collection by allowing users to more easily identify common substructures and key differences between graphs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Graphs can be used to represent a variety of information, from molecular structures to biological pathways to computational workflows. With a growing volume of data represented as graphs, the problem of understanding and analyzing the variations in a collection of graphs is of increasing importance. We present an algorithm to compute a single summary graph that efficiently encodes an entire collection of graphs by finding and merging similar nodes and edges. Instead of only merging nodes and edges that are exactly the same, we use domain-specific comparison functions to collapse similar nodes and edges which allows us to generate more compact representations of the collection. In addition, we have developed methods that allow users to interactively control the display of these summary graphs. These interactions include the ability to highlight individual graphs in the summary, control the succinctness of the summary, and explicitly define when specific nodes should or should not be merged. We show that our approach to generating and interacting with graph summaries leads to a better understanding of a graph collection by allowing users to more easily identify common substructures and key differences between graphs.",
"fno": "06596128",
"keywords": [
"Biology Computing",
"Data Analysis",
"Data Structures",
"Data Visualisation",
"Graph Theory",
"Visual Summaries",
"Graph Collections",
"Information Representation",
"Biological Pathways",
"Molecular Structures",
"Computational Workflows",
"Data Representation",
"Data Analysis",
"Single Summary Graph",
"Graph Collection",
"Domain Specific Comparison Functions",
"Summary Graph Display",
"Highlight Individual Graphs",
"Visualization",
"Layout",
"Compounds",
"Data Visualization",
"Matrices",
"Heuristic Algorithms",
"Color"
],
"authors": [
{
"affiliation": "NYU-Poly, USA",
"fullName": "David Koop",
"givenName": "David",
"surname": "Koop",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NYU-Poly, USA",
"fullName": "Juliana Freire",
"givenName": "Juliana",
"surname": "Freire",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NYU-Poly, USA",
"fullName": "Cláudio T. Silva",
"givenName": "Cláudio T.",
"surname": "Silva",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pacificvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-02-01T00:00:00",
"pubType": "proceedings",
"pages": "57-64",
"year": "2013",
"issn": "2165-8765",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06596127",
"articleId": "12OmNAFnCxu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06596129",
"articleId": "12OmNxGSmhw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2014/4103/0/4103a234",
"title": "Semantic Blossom Graph: A New Approach for Visual Graph Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2014/4103a234/12OmNB1eJxU",
"parentPublication": {
"id": "proceedings/iv/2014/4103/0",
"title": "2014 18th International Conference on Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2016/0252/0/07739664",
"title": "Visual analysis of compound graphs",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2016/07739664/12OmNx7G5RX",
"parentPublication": {
"id": "proceedings/vlhcc/2016/0252/0",
"title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vissoft/2014/6150/0/6150a045",
"title": "A Domain-Specific Language for Visualizing Software Dependencies as a Graph",
"doi": null,
"abstractUrl": "/proceedings-article/vissoft/2014/6150a045/12OmNzsrwgT",
"parentPublication": {
"id": "proceedings/vissoft/2014/6150/0",
"title": "2014 Second IEEE Working Conference on Software Visualization (VISSOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08057796",
"title": "Narrative Collage of Image Collections by Scene Graph Recombination",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08057796/13rRUwI5TR6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08031987",
"title": "Joint Graph Layouts for Visualizing Collections of Segmented Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08031987/13rRUxC0SWg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06812198",
"title": "Visual Adjacency Lists for Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06812198/13rRUxcbnCs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/06/mcg2015060030",
"title": "Key-Node-Separated Graph Clustering and Layouts for Human Relationship Graph Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2015/06/mcg2015060030/13rRUyogGCG",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/02/08269823",
"title": "A Coloring Algorithm for Disambiguating Graph and Map Drawings",
"doi": null,
"abstractUrl": "/journal/tg/2019/02/08269823/17D45Xtvpay",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2021/3931/0/393100a061",
"title": "Stable Visual Summaries for Trajectory Collections",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2021/393100a061/1tTtq7bTYJi",
"parentPublication": {
"id": "proceedings/pacificvis/2021/3931/0",
"title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552242",
"title": "Simultaneous Matrix Orderings for Graph Collections",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552242/1xic4WSUlKE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAndiq9",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"acronym": "pacificvis",
"groupId": "1001657",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNscfI0r",
"doi": "10.1109/PacificVis.2013.6596126",
"title": "Smooth bundling of large streaming and sequence graphs",
"normalizedTitle": "Smooth bundling of large streaming and sequence graphs",
"abstract": "Dynamic graphs are increasingly pervasive in modern information systems. However, understanding how a graph changes in time is difficult. We present here two techniques for simplified visualization of dynamic graphs using edge bundles. The first technique uses a recent image-based graph bundling method to create smoothly changing bundles from streaming graphs. The second technique incorporates additional edge-correspondence data and is thereby suited to visualize discrete graph sequences. We illustrate our methods with examples from real-world large dynamic graph datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dynamic graphs are increasingly pervasive in modern information systems. However, understanding how a graph changes in time is difficult. We present here two techniques for simplified visualization of dynamic graphs using edge bundles. The first technique uses a recent image-based graph bundling method to create smoothly changing bundles from streaming graphs. The second technique incorporates additional edge-correspondence data and is thereby suited to visualize discrete graph sequences. We illustrate our methods with examples from real-world large dynamic graph datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dynamic graphs are increasingly pervasive in modern information systems. However, understanding how a graph changes in time is difficult. We present here two techniques for simplified visualization of dynamic graphs using edge bundles. The first technique uses a recent image-based graph bundling method to create smoothly changing bundles from streaming graphs. The second technique incorporates additional edge-correspondence data and is thereby suited to visualize discrete graph sequences. We illustrate our methods with examples from real-world large dynamic graph datasets.",
"fno": "06596126",
"keywords": [
"Data Visualisation",
"Graph Theory",
"Mathematics Computing",
"Smooth Bundling",
"Large Streaming Graph",
"Sequence Graphs",
"Information Systems",
"Dynamic Graph Visualization",
"Edge Bundles",
"Image Based Graph Bundling Method",
"Edge Correspondence Data",
"Discrete Graph Sequence Visualization",
"Real World Large Dynamic Graph Datasets",
"Cloning",
"Visualization",
"Image Edge Detection",
"Animation",
"Layout",
"Image Color Analysis",
"Clutter",
"I 3 3 Picture Image Generation Line And Curve Generation"
],
"authors": [
{
"affiliation": "ENAC/University of Toulouse, France",
"fullName": "C. Hurter",
"givenName": "C.",
"surname": "Hurter",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Groningen, the Netherlands",
"fullName": "O. Ersoy",
"givenName": "O.",
"surname": "Ersoy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Groningen, the Netherlands",
"fullName": "A. Telea",
"givenName": "A.",
"surname": "Telea",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pacificvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-02-01T00:00:00",
"pubType": "proceedings",
"pages": "41-48",
"year": "2013",
"issn": "2165-8765",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06596125",
"articleId": "12OmNs0kyAK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06596127",
"articleId": "12OmNAFnCxu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2015/6879/0/07156354",
"title": "Attribute-driven edge bundling for general graphs with applications in trail analysis",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156354/12OmNCaLEnG",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571244",
"title": "3D Edge Bundling for Geographical Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571244/12OmNqzu6LL",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2011/935/0/05742389",
"title": "Multilevel agglomerative edge bundling for visualizing large graphs",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2011/05742389/12OmNxj233Y",
"parentPublication": {
"id": "proceedings/pacificvis/2011/935/0",
"title": "2011 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2016/8942/0/8942a094",
"title": "On Edge Bundling and Node Layout for Mutually Connected Directed Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a094/12OmNzwZ6qg",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192715",
"title": "BiSet: Semantic Edge Bundling with Biclusters for Sensemaking",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192715/13rRUNvgz9T",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539373",
"title": "Towards Unambiguous Edge Bundling: Investigating Confluent Drawings for Network Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539373/13rRUwcS1CZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/08/06636295",
"title": "Bundled Visualization of DynamicGraph and Trail Data",
"doi": null,
"abstractUrl": "/journal/tg/2014/08/06636295/13rRUwd9CG3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/12/07374742",
"title": "CUBu: Universal Real-Time Bundling for Large Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2016/12/07374742/13rRUwgQpDx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122354",
"title": "Divided Edge Bundling for Directional Network Data",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122354/13rRUzpzeB1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/10/08423100",
"title": "The Effect of Edge Bundling and Seriation on Sensemaking of Biclusters in Bipartite Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2019/10/08423100/1d3e5UbWqis",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxE2mTD",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvxbhKS",
"doi": "10.1109/ICCVW.2015.140",
"title": "Video Summarization via Segments Summary Graphs",
"normalizedTitle": "Video Summarization via Segments Summary Graphs",
"abstract": "In this paper we propose a novel approach to video summarization that is based on the coherency analysis of segmented video frames as represented by region adjacency graphs. Similar segments across consecutive region adjacency graphs are matched and tracked using an efficient graph matching technique. Shot boundaries are detected based on a coherency score that measures the appearances and disappearances of tracked segments. As such, it is possible to form a compact representation of each detected shot-based on prevalent segmented regions and their relations - referred to as the 'segments summary graphs'. Furthermore, the segments summary graph is amenable for further semantic analysis and understanding of the scene. Experiments on benchmark datasets demonstrate that our method outperforms the state of the art summarization approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we propose a novel approach to video summarization that is based on the coherency analysis of segmented video frames as represented by region adjacency graphs. Similar segments across consecutive region adjacency graphs are matched and tracked using an efficient graph matching technique. Shot boundaries are detected based on a coherency score that measures the appearances and disappearances of tracked segments. As such, it is possible to form a compact representation of each detected shot-based on prevalent segmented regions and their relations - referred to as the 'segments summary graphs'. Furthermore, the segments summary graph is amenable for further semantic analysis and understanding of the scene. Experiments on benchmark datasets demonstrate that our method outperforms the state of the art summarization approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we propose a novel approach to video summarization that is based on the coherency analysis of segmented video frames as represented by region adjacency graphs. Similar segments across consecutive region adjacency graphs are matched and tracked using an efficient graph matching technique. Shot boundaries are detected based on a coherency score that measures the appearances and disappearances of tracked segments. As such, it is possible to form a compact representation of each detected shot-based on prevalent segmented regions and their relations - referred to as the 'segments summary graphs'. Furthermore, the segments summary graph is amenable for further semantic analysis and understanding of the scene. Experiments on benchmark datasets demonstrate that our method outperforms the state of the art summarization approaches.",
"fno": "5720b071",
"keywords": [
"Image Color Analysis",
"Image Segmentation",
"Semantics",
"Image Edge Detection",
"Motion Pictures",
"Encoding",
"Visualization"
],
"authors": [
{
"affiliation": null,
"fullName": "Mahmut Demir",
"givenName": "Mahmut",
"surname": "Demir",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "H. Isil Bozma",
"givenName": "H. Isil",
"surname": "Bozma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "1071-1077",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9711-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5720b062",
"articleId": "12OmNAS9zoD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5720b078",
"articleId": "12OmNqzcvQL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icsc/2018/4408/0/440801a071",
"title": "Automated Place Detection Based on Coherent Segments",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2018/440801a071/12OmNANkoj5",
"parentPublication": {
"id": "proceedings/icsc/2018/4408/0",
"title": "2018 IEEE 12th International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2017/1600/0/1600a372",
"title": "A Movie Summary Generation System",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2017/1600a372/12OmNAnuToF",
"parentPublication": {
"id": "proceedings/dsc/2017/1600/0",
"title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoit/2016/3584/0/07966840",
"title": "Text Summarization Using Sentiment Analysis for DUC Data",
"doi": null,
"abstractUrl": "/proceedings-article/icoit/2016/07966840/12OmNBuL1fi",
"parentPublication": {
"id": "proceedings/icoit/2016/3584/0",
"title": "2016 International Conference on Information Technology (ICIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926698",
"title": "Semantic Text Summarization of Long Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926698/12OmNqNG3ga",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a756",
"title": "Audiotory Movie Summarization by Detecting Scene Changes and Sound Events",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a756/12OmNrGb2gO",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdpsw/2016/3682/0/3682a867",
"title": "The Right Way to Search Evolving Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2016/3682a867/12OmNvDZF2Q",
"parentPublication": {
"id": "proceedings/ipdpsw/2016/3682/0",
"title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2014/5666/0/07004278",
"title": "MAGE: Matching approximate patterns in richly-attributed graphs",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2014/07004278/12OmNyeECxb",
"parentPublication": {
"id": "proceedings/big-data/2014/5666/0",
"title": "2014 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2016/03/mmu2016030023",
"title": "Fast Summarization of User-Generated Videos: Exploiting Semantic, Emotional, and Quality Clues",
"doi": null,
"abstractUrl": "/magazine/mu/2016/03/mmu2016030023/13rRUxBa534",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a154",
"title": "Visual Summarization of Lecture Video Segments for Enhanced Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a154/1qBbGn4I3Ru",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09599560",
"title": "Anomaly Detection in Dynamic Graphs via Transformer",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09599560/1yeC6nu6NsA",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy314br",
"title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"acronym": "vlhcc",
"groupId": "1001007",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx7G5RX",
"doi": "10.1109/VLHCC.2016.7739664",
"title": "Visual analysis of compound graphs",
"normalizedTitle": "Visual analysis of compound graphs",
"abstract": "Compound graphs consist of two separate components. On the one hand a graph structure describes which elements are related to each other and to what extent, i.e., inherent edge weights and directions may exist, which we refer to as adjacency edges. On the other hand the graph elements are not only related by adjacencies, but they are also hierarchically organized which might be considered another kind of relationship among the graph vertices. Those relations are further referred to as inclusion edges. There are various application domains in which such a data structure occurs and with which a data analyst has to deal, either analytically on the basis of algorithms or visually, i.e., more on the basis of diagrams and visual languages. In this paper we introduce a visualization tool that is able to provide linked views on both aspects, i.e., the graph relations and the hierarchical organization. We illustrate the usefulness of our tool in a case study investigating soccer team results that build weighted directed adjacency relations in a hierarchically structured world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Compound graphs consist of two separate components. On the one hand a graph structure describes which elements are related to each other and to what extent, i.e., inherent edge weights and directions may exist, which we refer to as adjacency edges. On the other hand the graph elements are not only related by adjacencies, but they are also hierarchically organized which might be considered another kind of relationship among the graph vertices. Those relations are further referred to as inclusion edges. There are various application domains in which such a data structure occurs and with which a data analyst has to deal, either analytically on the basis of algorithms or visually, i.e., more on the basis of diagrams and visual languages. In this paper we introduce a visualization tool that is able to provide linked views on both aspects, i.e., the graph relations and the hierarchical organization. We illustrate the usefulness of our tool in a case study investigating soccer team results that build weighted directed adjacency relations in a hierarchically structured world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Compound graphs consist of two separate components. On the one hand a graph structure describes which elements are related to each other and to what extent, i.e., inherent edge weights and directions may exist, which we refer to as adjacency edges. On the other hand the graph elements are not only related by adjacencies, but they are also hierarchically organized which might be considered another kind of relationship among the graph vertices. Those relations are further referred to as inclusion edges. There are various application domains in which such a data structure occurs and with which a data analyst has to deal, either analytically on the basis of algorithms or visually, i.e., more on the basis of diagrams and visual languages. In this paper we introduce a visualization tool that is able to provide linked views on both aspects, i.e., the graph relations and the hierarchical organization. We illustrate the usefulness of our tool in a case study investigating soccer team results that build weighted directed adjacency relations in a hierarchically structured world.",
"fno": "07739664",
"keywords": [
"Visualization",
"Data Visualization",
"Compounds",
"Layout",
"Measurement",
"Organizations",
"Image Color Analysis"
],
"authors": [
{
"affiliation": "VISUS, University of Stuttgart, 70569 Stuttgart, Germany",
"fullName": "Michael Burch",
"givenName": "Michael",
"surname": "Burch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vlhcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "54-58",
"year": "2016",
"issn": "1943-6106",
"isbn": "978-1-5090-0252-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07739663",
"articleId": "12OmNC2xhBY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07739665",
"articleId": "12OmNx4Q6Ah",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdmw/2015/8493/0/8493a675",
"title": "OLAP Visual Analytics on Large Software Call Graphs with Hierarchical ChordMap",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2015/8493a675/12OmNAWpyrN",
"parentPublication": {
"id": "proceedings/icdmw/2015/8493/0",
"title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2011/0868/0/06004017",
"title": "Layered TimeRadarTrees",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2011/06004017/12OmNArthca",
"parentPublication": {
"id": "proceedings/iv/2011/0868/0",
"title": "2011 15th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596128",
"title": "Visual summaries for graph collections",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596128/12OmNs5rkPo",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720b071",
"title": "Video Summarization via Segments Summary Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720b071/12OmNvxbhKS",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596150",
"title": "FlowGraph: A compound hierarchical graph for flow field exploration",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596150/12OmNyNQSKG",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2010/8485/0/05635217",
"title": "TimeSpiderTrees: A Novel Visual Metaphor for Dynamic Compound Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2010/05635217/12OmNypIYEz",
"parentPublication": {
"id": "proceedings/vlhcc/2010/8485/0",
"title": "2010 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596146",
"title": "Visualizing edge-edge relations in graphs",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596146/12OmNzsJ7ya",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/04/mcg2013040088",
"title": "Visual Matrix Clustering of Social Networks",
"doi": null,
"abstractUrl": "/magazine/cg/2013/04/mcg2013040088/13rRUx0xPCH",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0805",
"title": "Visual Exploration of Complex Time-Varying Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0805/13rRUxBa5x5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06812198",
"title": "Visual Adjacency Lists for Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06812198/13rRUxcbnCs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNASraww",
"title": "2009 IEEE Pacific Visualization Symposium",
"acronym": "pacificvis",
"groupId": "1001657",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy87QAS",
"doi": "10.1109/PACIFICVIS.2009.4906842",
"title": "A visual canonical adjacency matrix for graphs",
"normalizedTitle": "A visual canonical adjacency matrix for graphs",
"abstract": "Graph data mining algorithms rely on graph canonical forms to compare different graph structures. These canonical form definitions depend on node and edge labels. In this paper, we introduce a unique canonical visual matrix representation that only depends on a graph's topological information, so that two structurally identical graphs will have exactly the same visual adjacency matrix representation. In this canonical matrix, nodes are ordered based on a Breadth-First Search spanning tree. Special rules and filters are designed to guarantee the uniqueness of an arrangement. Such a unique matrix representation provides persistence and a stability which can be used and harnessed in visualization, especially for data exploration and studies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Graph data mining algorithms rely on graph canonical forms to compare different graph structures. These canonical form definitions depend on node and edge labels. In this paper, we introduce a unique canonical visual matrix representation that only depends on a graph's topological information, so that two structurally identical graphs will have exactly the same visual adjacency matrix representation. In this canonical matrix, nodes are ordered based on a Breadth-First Search spanning tree. Special rules and filters are designed to guarantee the uniqueness of an arrangement. Such a unique matrix representation provides persistence and a stability which can be used and harnessed in visualization, especially for data exploration and studies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Graph data mining algorithms rely on graph canonical forms to compare different graph structures. These canonical form definitions depend on node and edge labels. In this paper, we introduce a unique canonical visual matrix representation that only depends on a graph's topological information, so that two structurally identical graphs will have exactly the same visual adjacency matrix representation. In this canonical matrix, nodes are ordered based on a Breadth-First Search spanning tree. Special rules and filters are designed to guarantee the uniqueness of an arrangement. Such a unique matrix representation provides persistence and a stability which can be used and harnessed in visualization, especially for data exploration and studies.",
"fno": "04906842",
"keywords": [],
"authors": [
{
"affiliation": "Pfizer Research Technology Center, USA",
"fullName": "Hongli Li",
"givenName": null,
"surname": "Hongli Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Massachusetts Lowell, USA",
"fullName": "Georges Grinstein",
"givenName": "Georges",
"surname": "Grinstein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Massachusetts Lowell, USA",
"fullName": "Loura Costello",
"givenName": "Loura",
"surname": "Costello",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pacificvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-04-01T00:00:00",
"pubType": "proceedings",
"pages": "89-96",
"year": "2009",
"issn": null,
"isbn": "978-1-4244-4404-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04906841",
"articleId": "12OmNy4r3Zf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04906843",
"articleId": "12OmNscOUcs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2005/2397/0/23970339",
"title": "Representation of Graphs on a Matrix Layout",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2005/23970339/12OmNAP1YYj",
"parentPublication": {
"id": "proceedings/iv/2005/2397/0",
"title": "Ninth International Conference on Information Visualisation (IV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fpga/2000/2592/0/25920105",
"title": "A Representation for Dynamic Graphs in Reconfigurable Hardware and its Application to Fundamental Graph Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/fpga/2000/25920105/12OmNvRU0i3",
"parentPublication": {
"id": "proceedings/fpga/2000/2592/0",
"title": "Field-Programmable Gate Arrays, International ACM Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/1961/5428/0/00169",
"title": "Canonical forms of functions in p-valued logics",
"doi": null,
"abstractUrl": "/proceedings-article/focs/1961/00169/12OmNxG1yDU",
"parentPublication": {
"id": "proceedings/focs/1961/5428/0",
"title": "1st and 2nd Annual Symposium on Switching Circuit Theory and Logical Design (SWCT 1960-1961)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pnpm/2001/1248/0/12480101",
"title": "Efficient Solution of GSPNs Using Canonical Matrix Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/pnpm/2001/12480101/12OmNyGbIkL",
"parentPublication": {
"id": "proceedings/pnpm/2001/1248/0",
"title": "Proceedings 9th International Workshop on Petri Nets and Performance Models",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2013/9999/0/06877483",
"title": "Scalable matrix computations on large scale-free graphs using 2D graph partitioning",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2013/06877483/12OmNyeWdPK",
"parentPublication": {
"id": "proceedings/sc/2013/9999/0",
"title": "2013 SC - International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a616",
"title": "A Simplification Algorithm for Visualizing the Structure of Complex Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a616/12OmNzsJ7xO",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06812198",
"title": "Visual Adjacency Lists for Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06812198/13rRUxcbnCs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08249874",
"title": "Graph Thumbnails: Identifying and Comparing Multiple Graphs at a Glance",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08249874/14H4WOr0FCU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020413",
"title": "Skew-Symmetric Adjacency Matrices for Clustering Directed Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020413/1KfRNJ45ri0",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09226461",
"title": "Responsive Matrix Cells: A Focus+Context Approach for Exploring and Editing Multivariate Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09226461/1nYrgS8Y9Py",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC3Xhho",
"title": "Information Visualization, IEEE Symposium on",
"acronym": "ieee-infovis",
"groupId": "1000371",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyugyVo",
"doi": "10.1109/INFVIS.2004.18",
"title": "Dynamic Drawing of Clustered Graphs",
"normalizedTitle": "Dynamic Drawing of Clustered Graphs",
"abstract": "This paper presents an algorithm for drawing a sequence of graphs that contain an inherent grouping of their vertex set into clusters. It differs from previous work on dynamic graph drawing in the emphasis that is put on maintaining the clustered structure of the graph during incremental layout. The algorithm works online and allows arbitrary modifications to the graph. It is generic and can be implemented using a wide range of static force-directed graph layout tools. The paper introduces several metrics for measuring layout quality of dynamic clustered graphs. The performance of our algorithm is analyzed using these metrics. The algorithm has been successfully applied to visualizing mobile object software.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an algorithm for drawing a sequence of graphs that contain an inherent grouping of their vertex set into clusters. It differs from previous work on dynamic graph drawing in the emphasis that is put on maintaining the clustered structure of the graph during incremental layout. The algorithm works online and allows arbitrary modifications to the graph. It is generic and can be implemented using a wide range of static force-directed graph layout tools. The paper introduces several metrics for measuring layout quality of dynamic clustered graphs. The performance of our algorithm is analyzed using these metrics. The algorithm has been successfully applied to visualizing mobile object software.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an algorithm for drawing a sequence of graphs that contain an inherent grouping of their vertex set into clusters. It differs from previous work on dynamic graph drawing in the emphasis that is put on maintaining the clustered structure of the graph during incremental layout. The algorithm works online and allows arbitrary modifications to the graph. It is generic and can be implemented using a wide range of static force-directed graph layout tools. The paper introduces several metrics for measuring layout quality of dynamic clustered graphs. The performance of our algorithm is analyzed using these metrics. The algorithm has been successfully applied to visualizing mobile object software.",
"fno": "87790191",
"keywords": [
"Graph Drawing",
"Dynamic Layout",
"Mobile Objects",
"Software Visualization"
],
"authors": [
{
"affiliation": "Technion - Israel Institute of Technology",
"fullName": "Yaniv Frishman",
"givenName": "Yaniv",
"surname": "Frishman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technion - Israel Institute of Technology",
"fullName": "Ayellet Tal",
"givenName": "Ayellet",
"surname": "Tal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-infovis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-10-01T00:00:00",
"pubType": "proceedings",
"pages": "191-198",
"year": "2004",
"issn": "1522-404X",
"isbn": "0-7803-8779-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "87790183",
"articleId": "12OmNy5zsy4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "87790199",
"articleId": "12OmNCgrDcT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/apvis/2007/0808/0/04126223",
"title": "Force-directed drawing method for intersecting clustered graphs",
"doi": null,
"abstractUrl": "/proceedings-article/apvis/2007/04126223/12OmNqH9hkO",
"parentPublication": {
"id": "proceedings/apvis/2007/0808/0",
"title": "Asia-Pacific Symposium on Visualisation 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2016/8942/0/8942a069",
"title": "Drawing Clustered Graphs Using Stress Majorization and Force-Directed Placements",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a069/12OmNrMHOnz",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2014/4103/0/4103a013",
"title": "Drawing Large Weighted Graphs Using Clustered Force-Directed Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2014/4103a013/12OmNro0I3Y",
"parentPublication": {
"id": "proceedings/iv/2014/4103/0",
"title": "2014 18th International Conference on Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wpdrts/1995/7099/0/70990113",
"title": "Drawing execution graphs by parsing",
"doi": null,
"abstractUrl": "/proceedings-article/wpdrts/1995/70990113/12OmNro0IcA",
"parentPublication": {
"id": "proceedings/wpdrts/1995/7099/0",
"title": "Parallel and Distributed Real-Time Systems, Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2013/5051/0/5051a088",
"title": "Improving the Quality of Clustered Graph Drawing through a Dummy Element Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2013/5051a088/12OmNvxKtYl",
"parentPublication": {
"id": "proceedings/cgiv/2013/5051/0",
"title": "2013 10th International Conference Computer Graphics, Imaging and Visualization (CGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/1995/08/e0662",
"title": "Parametric Graph Drawing",
"doi": null,
"abstractUrl": "/journal/ts/1995/08/e0662/13rRUNvya2D",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1998/11/t1297",
"title": "Interactive Orthogonal Graph Drawing",
"doi": null,
"abstractUrl": "/journal/tc/1998/11/t1297/13rRUwfZBZo",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/01/v0046",
"title": "Combining Hierarchy and Energy Drawing Directed Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2004/01/v0046/13rRUwgQpDf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/04/ttg2008040727",
"title": "Online Dynamic Graph Drawing",
"doi": null,
"abstractUrl": "/journal/tg/2008/04/ttg2008040727/13rRUxBJhvo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09950620",
"title": "Toward Efficient Deep Learning for Graph Drawing (DL4GD)",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09950620/1Ik4IPEtvu8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1rk0ady4yQ0",
"title": "2020 IEEE Visualization in Data Science (VDS)",
"acronym": "vds",
"groupId": "1829064",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1rk0ciCrXQQ",
"doi": "10.1109/VDS51726.2020.00008",
"title": "dg2pix: Pixel-Based Visual Analysis of Dynamic Graphs",
"normalizedTitle": "dg2pix: Pixel-Based Visual Analysis of Dynamic Graphs",
"abstract": "Presenting long sequences of dynamic graphs remains challenging due to the underlying large-scale and high-dimensional data. We propose dg2pix, a novel pixel-based visualization technique, to visually explore temporal and structural properties in long sequences of large-scale graphs. The approach consists of three main steps: (1) the multiscale modeling of the temporal dimension; (2) unsupervised graph embeddings to learn low-dimensional representations of the dynamic graph data; and (3) an interactive pixel-based visualization to simultaneously explore the evolving data at different temporal aggregation scales. dg2pix provides a scalable overview of a dynamic graph, supports the exploration of long sequences of high-dimensional graph data, and enables the identification and comparison of similar temporal states. We show the applicability of the technique to synthetic and real-world datasets, demonstrating that temporal patterns in dynamic graphs can be identified and interpreted over time. dg2pix contributes a suitable intermediate representation between node-link diagrams at the high detail end and matrix representations on the low detail end.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presenting long sequences of dynamic graphs remains challenging due to the underlying large-scale and high-dimensional data. We propose dg2pix, a novel pixel-based visualization technique, to visually explore temporal and structural properties in long sequences of large-scale graphs. The approach consists of three main steps: (1) the multiscale modeling of the temporal dimension; (2) unsupervised graph embeddings to learn low-dimensional representations of the dynamic graph data; and (3) an interactive pixel-based visualization to simultaneously explore the evolving data at different temporal aggregation scales. dg2pix provides a scalable overview of a dynamic graph, supports the exploration of long sequences of high-dimensional graph data, and enables the identification and comparison of similar temporal states. We show the applicability of the technique to synthetic and real-world datasets, demonstrating that temporal patterns in dynamic graphs can be identified and interpreted over time. dg2pix contributes a suitable intermediate representation between node-link diagrams at the high detail end and matrix representations on the low detail end.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presenting long sequences of dynamic graphs remains challenging due to the underlying large-scale and high-dimensional data. We propose dg2pix, a novel pixel-based visualization technique, to visually explore temporal and structural properties in long sequences of large-scale graphs. The approach consists of three main steps: (1) the multiscale modeling of the temporal dimension; (2) unsupervised graph embeddings to learn low-dimensional representations of the dynamic graph data; and (3) an interactive pixel-based visualization to simultaneously explore the evolving data at different temporal aggregation scales. dg2pix provides a scalable overview of a dynamic graph, supports the exploration of long sequences of high-dimensional graph data, and enables the identification and comparison of similar temporal states. We show the applicability of the technique to synthetic and real-world datasets, demonstrating that temporal patterns in dynamic graphs can be identified and interpreted over time. dg2pix contributes a suitable intermediate representation between node-link diagrams at the high detail end and matrix representations on the low detail end.",
"fno": "928400a032",
"keywords": [
"Data Visualisation",
"Graph Theory",
"Unsupervised Learning",
"Interactive Pixel Based Visualization",
"High Dimensional Graph Data",
"Dg 2 Pix",
"Pixel Based Visual Analysis",
"Temporal Properties",
"Structural Properties",
"Large Scale Graphs",
"Low Dimensional Representations",
"Dynamic Graph Data",
"Pixel Based Visualization Technique",
"Temporal Aggregation Scales",
"Unsupervised Graph Embeddings",
"Node Link Diagrams",
"Visualization",
"Data Visualization",
"Prototypes",
"Data Science",
"Data Models",
"Human Centered Computing",
"Visualization",
"Visualization Techniques",
"Machine Learning",
"Learning Paradigms",
"Unsupervised Learning"
],
"authors": [
{
"affiliation": "University of Konstanz",
"fullName": "Eren Cakmak",
"givenName": "Eren",
"surname": "Cakmak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Independent Researcher",
"fullName": "Dominik Jäckle",
"givenName": "Dominik",
"surname": "Jäckle",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Graz",
"fullName": "Tobias Schreck",
"givenName": "Tobias",
"surname": "Schreck",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Konstanz",
"fullName": "Daniel Keim",
"givenName": "Daniel",
"surname": "Keim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vds",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-10-01T00:00:00",
"pubType": "proceedings",
"pages": "32-41",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9284-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1rk0bv41LWw",
"name": "pvds202092840-09355219s1-mm_928400a032.zip",
"size": "29.1 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvds202092840-09355219s1-mm_928400a032.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "928400a022",
"articleId": "1rk0cA2NgUo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "928400a042",
"articleId": "1rk0cpGr1yo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2013/4797/0/06596126",
"title": "Smooth bundling of large streaming and sequence graphs",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596126/12OmNscfI0r",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2016/0252/0/07739664",
"title": "Visual analysis of compound graphs",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2016/07739664/12OmNx7G5RX",
"parentPublication": {
"id": "proceedings/vlhcc/2016/0252/0",
"title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1993/3970/0/00269613",
"title": "Onion graphs: aesthetics and layout",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1993/00269613/12OmNxEBz7q",
"parentPublication": {
"id": "proceedings/vl/1993/3970/0",
"title": "Proceedings 1993 IEEE Symposium on Visual Languages",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a230",
"title": "Dynamic Graph Visualization on Different Temporal Granularities",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a230/12OmNzxPTGk",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06812198",
"title": "Visual Adjacency Lists for Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06812198/13rRUxcbnCs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0/945700a283",
"title": "DyDom: Detecting Malicious Domains with Spatial-Temporal Analysis on Dynamic Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2021/945700a283/1DNDB7f1EeQ",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0",
"title": "2021 IEEE 23rd Int Conf on High Performance Computing & Communications; 7th Int Conf on Data Science & Systems; 19th Int Conf on Smart City; 7th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/10048576",
"title": "Time-Aware Dynamic Graph Embedding for Asynchronous Structural Evolution",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/10048576/1KQ5Ef9SXeM",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933748",
"title": "Nonuniform Timeslicing of Dynamic Graphs Based on Visual Complexity",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933748/1fTgIQAyj8k",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222072",
"title": "Multiscale Snapshots: Visual Analysis of Temporal Summaries in Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222072/1nTqwNTE1AQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smds/2021/0058/0/005800a184",
"title": "DynGraphTrans: Dynamic Graph Embedding via Modified Universal Transformer Networks for Financial Transaction Data",
"doi": null,
"abstractUrl": "/proceedings-article/smds/2021/005800a184/1yeQwSQeMlq",
"parentPublication": {
"id": "proceedings/smds/2021/0058/0",
"title": "2021 IEEE International Conference on Smart Data Services (SMDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNylsZKi",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "1998",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCeK2eN",
"doi": "10.1109/VISUAL.1998.745315",
"title": "Simplification of Tetrahedral Meshes",
"normalizedTitle": "Simplification of Tetrahedral Meshes",
"abstract": "We present a method for the construction of multiple levels of tetrahedral meshes approximating a trivariate function at different levels of detail. Starting with an initial, high-resolution triangulation of a three-dimensional region, we construct coarser representation levels by collapsing tetrahedra. Each triangulation defines a linear spline function, where the function values associated with the vertices are the spline coefficients. Based on predicted errors, we collapse tetrahedron in the grid that do not cause the maximum error to exceed a use-specified threshold. Bounds are stored for individual tetrahedra and are updated as the mesh is simplified. We continue the simplification process until a certain error is reached. The result is a hierarchical data description suited for the efficient visualization of large data sets at varying levels of detail.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a method for the construction of multiple levels of tetrahedral meshes approximating a trivariate function at different levels of detail. Starting with an initial, high-resolution triangulation of a three-dimensional region, we construct coarser representation levels by collapsing tetrahedra. Each triangulation defines a linear spline function, where the function values associated with the vertices are the spline coefficients. Based on predicted errors, we collapse tetrahedron in the grid that do not cause the maximum error to exceed a use-specified threshold. Bounds are stored for individual tetrahedra and are updated as the mesh is simplified. We continue the simplification process until a certain error is reached. The result is a hierarchical data description suited for the efficient visualization of large data sets at varying levels of detail.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a method for the construction of multiple levels of tetrahedral meshes approximating a trivariate function at different levels of detail. Starting with an initial, high-resolution triangulation of a three-dimensional region, we construct coarser representation levels by collapsing tetrahedra. Each triangulation defines a linear spline function, where the function values associated with the vertices are the spline coefficients. Based on predicted errors, we collapse tetrahedron in the grid that do not cause the maximum error to exceed a use-specified threshold. Bounds are stored for individual tetrahedra and are updated as the mesh is simplified. We continue the simplification process until a certain error is reached. The result is a hierarchical data description suited for the efficient visualization of large data sets at varying levels of detail.",
"fno": "91760287",
"keywords": [
"Approximation Hierarchical Representation Mesh Generation Multiresolution Method Scattered Data Spline Triangulation Visualization"
],
"authors": [
{
"affiliation": "University of California at Davis",
"fullName": "Issac J. Trotts",
"givenName": "Issac J.",
"surname": "Trotts",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at Davis",
"fullName": "Bernd Hamann",
"givenName": "Bernd",
"surname": "Hamann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at Davis",
"fullName": "Kenneth I. Joy",
"givenName": "Kenneth I.",
"surname": "Joy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at Davis",
"fullName": "David F. Wiley",
"givenName": "David F.",
"surname": "Wiley",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1998-10-01T00:00:00",
"pubType": "proceedings",
"pages": "287",
"year": "1998",
"issn": null,
"isbn": "0-8186-9176-x",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "91760279",
"articleId": "12OmNyUFg3k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "91760297",
"articleId": "12OmNyQGSpp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/1997/8262/0/82620135",
"title": "Multiresolution tetrahedral framework for visualizing regular volume data",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620135/12OmNBqdrfD",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmp/2004/2078/0/20780115",
"title": "Bivariate Cubic Spline Space and Bivariate Cubic NURBS Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2004/20780115/12OmNxUMHo1",
"parentPublication": {
"id": "proceedings/gmp/2004/2078/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1998/9176/0/91760119",
"title": "Efficient Co-Triangulation of Large Data Sets",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1998/91760119/12OmNyLiuqF",
"parentPublication": {
"id": "proceedings/ieee-vis/1998/9176/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1999/5897/0/58970053",
"title": "Implant Sprays: Compression of Progressive Tetrahedral Mesh Connectivity",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1999/58970053/12OmNyNQSCe",
"parentPublication": {
"id": "proceedings/ieee-vis/1999/5897/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/1997/8028/0/80280127",
"title": "Incremental view-dependent multiresolution triangulation of terrain",
"doi": null,
"abstractUrl": "/proceedings-article/pg/1997/80280127/12OmNzT7OyK",
"parentPublication": {
"id": "proceedings/pg/1997/8028/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dagstuhl/1997/0503/0/05030105",
"title": "Cluster-Based Generation of Hierarchical Surface Models",
"doi": null,
"abstractUrl": "/proceedings-article/dagstuhl/1997/05030105/12OmNzgeLHM",
"parentPublication": {
"id": "proceedings/dagstuhl/1997/0503/0",
"title": "Dagstuhl '97 - Scientific Visualization Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/01/v0017",
"title": "On Simulated Annealing and the Construction of Linear Spline Approximations for Scattered Data",
"doi": null,
"abstractUrl": "/journal/tg/2001/01/v0017/13rRUNvgz41",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/01/v0030",
"title": "On a Construction of a Hierarchy of Best Linear Spline Approximations Using Repeated Bisection",
"doi": null,
"abstractUrl": "/journal/tg/1999/01/v0030/13rRUwfI0PV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/03/v0224",
"title": "Simplification of Tetrahedral Meshes with Error Bounds",
"doi": null,
"abstractUrl": "/journal/tg/1999/03/v0224/13rRUxly95r",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/05/v0548",
"title": "On a Construction of a Hierarchy of Best Linear Spline Approximations Using a Finite Element Approach",
"doi": null,
"abstractUrl": "/journal/tg/2004/05/v0548/13rRUzp02ob",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCmpcNk",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxwENCm",
"doi": "10.1109/VIS.2005.94",
"title": "Streaming Meshes",
"normalizedTitle": "Streaming Meshes",
"abstract": "Recent years have seen an immense increase in the complexity of geometric data sets. Today's gigabyte-sized polygon models can no longer be completely loaded into the main memory of common desktop PCs. Unfortunately, current mesh formats, which were designed years ago when meshes were orders of magnitudes smaller, do not account for this. Using such formats to store large meshes is inefficient and complicates all subsequent processing. We describe a streaming format for polygon meshes that is simple enough to replace current offline mesh formats and is more suitable for representing large data sets. Furthermore, it is an ideal input and output format for I/O-efficient out-of-core algorithms that process meshes in a streaming, possibly pipelined, fashion. This paper chiefly concerns the underlying theory and the practical aspects of creating and working with this new representation. In particular, we describe desirable qualities for streaming meshes and methods for converting meshes from a traditional to a streaming format. A central theme of this paper is the issue of coherent and compatible layouts of the mesh vertices and polygons. We present metrics and diagrams that characterize the coherence of a mesh layout and suggest appropriate strategies for improving its \"streamability.\" To this end, we outline several out-of-core algorithms for reordering meshes with poor coherence, and present results for a menagerie of well known and generally incoherent surface meshes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent years have seen an immense increase in the complexity of geometric data sets. Today's gigabyte-sized polygon models can no longer be completely loaded into the main memory of common desktop PCs. Unfortunately, current mesh formats, which were designed years ago when meshes were orders of magnitudes smaller, do not account for this. Using such formats to store large meshes is inefficient and complicates all subsequent processing. We describe a streaming format for polygon meshes that is simple enough to replace current offline mesh formats and is more suitable for representing large data sets. Furthermore, it is an ideal input and output format for I/O-efficient out-of-core algorithms that process meshes in a streaming, possibly pipelined, fashion. This paper chiefly concerns the underlying theory and the practical aspects of creating and working with this new representation. In particular, we describe desirable qualities for streaming meshes and methods for converting meshes from a traditional to a streaming format. A central theme of this paper is the issue of coherent and compatible layouts of the mesh vertices and polygons. We present metrics and diagrams that characterize the coherence of a mesh layout and suggest appropriate strategies for improving its \"streamability.\" To this end, we outline several out-of-core algorithms for reordering meshes with poor coherence, and present results for a menagerie of well known and generally incoherent surface meshes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent years have seen an immense increase in the complexity of geometric data sets. Today's gigabyte-sized polygon models can no longer be completely loaded into the main memory of common desktop PCs. Unfortunately, current mesh formats, which were designed years ago when meshes were orders of magnitudes smaller, do not account for this. Using such formats to store large meshes is inefficient and complicates all subsequent processing. We describe a streaming format for polygon meshes that is simple enough to replace current offline mesh formats and is more suitable for representing large data sets. Furthermore, it is an ideal input and output format for I/O-efficient out-of-core algorithms that process meshes in a streaming, possibly pipelined, fashion. This paper chiefly concerns the underlying theory and the practical aspects of creating and working with this new representation. In particular, we describe desirable qualities for streaming meshes and methods for converting meshes from a traditional to a streaming format. A central theme of this paper is the issue of coherent and compatible layouts of the mesh vertices and polygons. We present metrics and diagrams that characterize the coherence of a mesh layout and suggest appropriate strategies for improving its \"streamability.\" To this end, we outline several out-of-core algorithms for reordering meshes with poor coherence, and present results for a menagerie of well known and generally incoherent surface meshes.",
"fno": "27660030",
"keywords": [],
"authors": [
{
"affiliation": "University of North Carolina at Chapel Hill",
"fullName": "Martin Isenburg",
"givenName": "Martin",
"surname": "Isenburg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lawrence Livermore National Laboratory",
"fullName": "Peter Lindstrom",
"givenName": "Peter",
"surname": "Lindstrom",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-10-01T00:00:00",
"pubType": "proceedings",
"pages": "30",
"year": "2005",
"issn": null,
"isbn": "0-7803-9462-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01532815",
"articleId": "12OmNzayNAg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01532816",
"articleId": "12OmNBNM8VS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/smi/2004/2075/0/20750209",
"title": "View-Dependent Streaming of Progressive Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2004/20750209/12OmNAlNiMU",
"parentPublication": {
"id": "proceedings/smi/2004/2075/0",
"title": "Proceedings. Shape Modeling International 2004",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177472",
"title": "Instance-aware simplification of 3D polygonal meshes",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177472/12OmNwDSdmg",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532800",
"title": "Streaming meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532800/12OmNxRWIap",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2012/1365/0/06274677",
"title": "Simplification Algorithm for General Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2012/06274677/12OmNxzMo0O",
"parentPublication": {
"id": "proceedings/cso/2012/1365/0",
"title": "2012 Fifth International Joint Conference on Computational Sciences and Optimization (CSO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2006/0366/0/04036898",
"title": "Format-Independent Multimedia Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2006/04036898/12OmNy7yEeu",
"parentPublication": {
"id": "proceedings/icme/2006/0366/0",
"title": "2006 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607481",
"title": "Loss tolerance scheme for 3D progressive meshes streaming over networks",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607481/12OmNywxlLO",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-css-icess/2014/6123/0/07056726",
"title": "A Technique for the Long Term Preservation of Finite Element Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-css-icess/2014/07056726/12OmNzmLxPB",
"parentPublication": {
"id": "proceedings/hpcc-css-icess/2014/6123/0",
"title": "2014 IEEE International Conference on High Performance Computing and Communications (HPCC), 2014 IEEE 6th International Symposium on Cyberspace Safety and Security (CSS) and 2014 IEEE 11th International Conference on Embedded Software and Systems (ICESS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/01/v0079",
"title": "Compressed Progressive Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2000/01/v0079/13rRUwhpBNZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v1221",
"title": "Out-of-Core Remeshing of Large Polygonal Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v1221/13rRUxcsYLE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/01/04015405",
"title": "Streaming Simplification of Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2007/01/04015405/13rRUyY28Yk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCf1Dp1",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyQphf1",
"doi": "10.1109/VISUAL.2002.1183767",
"title": "TetFusion: An Algorithm For Rapid Tetrahedral Mesh Simplification",
"normalizedTitle": "TetFusion: An Algorithm For Rapid Tetrahedral Mesh Simplification",
"abstract": "This paper introduces an algorithm for rapid progressive simplification of tetrahedral meshes: TetFusion. We describe how a simple geometry decimation operation steers a rapid and controlled progressive simplification of tetrahedral meshes, while also taking care of complex mesh-inconsistency problems. The algorithm features a high decimation ratio per step, and inherently discourages any cases of self-intersection of boundary, element-boundary intersection at concave boundary-regions, and negative volume tetrahedra (flipping). We achieved rigorous reduction ratios of up to 98% for meshes consisting of 827,904 elements in less than 2 minutes, progressing through a series of level-of-details (LoDs) of the mesh in a controlled manner. We describe how the approach supports a balanced re-distribution of space between tetrahedral elements, and explain some useful control parameters that make it faster and more intuitive than edge collapse-based decimation methods for volumetric meshes [3, 19, 21, 22]. Finally, we discuss how this approach can be employed for rapid LoD prototyping of large time-varying datasets as an aid to interactive visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces an algorithm for rapid progressive simplification of tetrahedral meshes: TetFusion. We describe how a simple geometry decimation operation steers a rapid and controlled progressive simplification of tetrahedral meshes, while also taking care of complex mesh-inconsistency problems. The algorithm features a high decimation ratio per step, and inherently discourages any cases of self-intersection of boundary, element-boundary intersection at concave boundary-regions, and negative volume tetrahedra (flipping). We achieved rigorous reduction ratios of up to 98% for meshes consisting of 827,904 elements in less than 2 minutes, progressing through a series of level-of-details (LoDs) of the mesh in a controlled manner. We describe how the approach supports a balanced re-distribution of space between tetrahedral elements, and explain some useful control parameters that make it faster and more intuitive than edge collapse-based decimation methods for volumetric meshes [3, 19, 21, 22]. Finally, we discuss how this approach can be employed for rapid LoD prototyping of large time-varying datasets as an aid to interactive visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces an algorithm for rapid progressive simplification of tetrahedral meshes: TetFusion. We describe how a simple geometry decimation operation steers a rapid and controlled progressive simplification of tetrahedral meshes, while also taking care of complex mesh-inconsistency problems. The algorithm features a high decimation ratio per step, and inherently discourages any cases of self-intersection of boundary, element-boundary intersection at concave boundary-regions, and negative volume tetrahedra (flipping). We achieved rigorous reduction ratios of up to 98% for meshes consisting of 827,904 elements in less than 2 minutes, progressing through a series of level-of-details (LoDs) of the mesh in a controlled manner. We describe how the approach supports a balanced re-distribution of space between tetrahedral elements, and explain some useful control parameters that make it faster and more intuitive than edge collapse-based decimation methods for volumetric meshes [3, 19, 21, 22]. Finally, we discuss how this approach can be employed for rapid LoD prototyping of large time-varying datasets as an aid to interactive visualization.",
"fno": "7498chopra",
"keywords": [
"Mesh Simplification",
"Multi Resolution",
"Level Of Detail",
"Unstructured Meshes"
],
"authors": [
{
"affiliation": "Mississippi State University",
"fullName": "Prashant Chopra",
"givenName": "Prashant",
"surname": "Chopra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mississippi State University",
"fullName": "Joerg Meyer",
"givenName": "Joerg",
"surname": "Meyer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-10-01T00:00:00",
"pubType": "proceedings",
"pages": "null",
"year": "2002",
"issn": "1070-2385",
"isbn": "0-7803-7498-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7498tasdizen",
"articleId": "12OmNyRg4pW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7498isenburg",
"articleId": "12OmNroijmR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isise/2008/3494/2/3494b414",
"title": "A Tetrahedral Mesh Generation Algorithm from Medical Images",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b414/12OmNBUS7cC",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1998/9176/0/91760287",
"title": "Simplification of Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1998/91760287/12OmNCeK2eN",
"parentPublication": {
"id": "proceedings/ieee-vis/1998/9176/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498chopra2",
"title": "Immersive Volume Visualization of Seismic Simulations: A Case Study of Techniques Invented and Lessons Learned",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498chopra2/12OmNrkBwv3",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2002/1784/0/17840477",
"title": "Subdivision Surface Simplification",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840477/12OmNviZldv",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300061",
"title": "Large Mesh Simplification using Processing Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300061/12OmNweBUR1",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2004/8781/0/87810071",
"title": "Texture-Encoded Tetrahedral Strips",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2004/87810071/12OmNxwnctV",
"parentPublication": {
"id": "proceedings/vv/2004/8781/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vg/2005/26/0/01500537",
"title": "Simplification of unstructured tetrahedral meshes by point sampling",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500537/12OmNyywxC8",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicse/2009/4027/0/4027a060",
"title": "An Efficient Mesh Simplification Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2009/4027a060/12OmNzxyiCQ",
"parentPublication": {
"id": "proceedings/icicse/2009/4027/0",
"title": "2009 Fourth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/03/v0224",
"title": "Simplification of Tetrahedral Meshes with Error Bounds",
"doi": null,
"abstractUrl": "/journal/tg/1999/03/v0224/13rRUxly95r",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/01/04015405",
"title": "Streaming Simplification of Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2007/01/04015405/13rRUyY28Yk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNro0Ib9",
"title": "Volume Graphics 2005",
"acronym": "vg",
"groupId": "1002149",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyywxC8",
"doi": "10.1109/VG.2005.194110",
"title": "Simplification of unstructured tetrahedral meshes by point sampling",
"normalizedTitle": "Simplification of unstructured tetrahedral meshes by point sampling",
"abstract": "Tetrahedral meshes are widely used in scientific computing for representing 3D scalar, vector, and tensor fields. The size and complexity of some of these meshes can limit the performance of many visualization algorithms, making it hard to achieve interactive visualization. The use of simplified models is one way to enable the real-time exploration of these datasets. In this paper, we propose a novel technique for simplifying large unstructured meshes. Most current techniques simplify the geometry of the mesh using edge collapses. Our technique simplifies an underlying scalar field directly by segmenting the original scalar field into two pieces: the boundary of the original domain and the interior samples of the scalar field. We then simplify each piece separately, taking into account proper error bounds. Finally, we combine the simplified domain boundary and scalar field into a complete, simplified mesh that can be visualized with standard unstructured-data visualization tools. Our technique is much faster than edge-collapse-based simplification approaches. Furthermore, it is particularly suitable for aggressive simplification. Experiments show that isosurfaces and volume renderings of meshes produced by our technique have few noticeable visual artifacts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Tetrahedral meshes are widely used in scientific computing for representing 3D scalar, vector, and tensor fields. The size and complexity of some of these meshes can limit the performance of many visualization algorithms, making it hard to achieve interactive visualization. The use of simplified models is one way to enable the real-time exploration of these datasets. In this paper, we propose a novel technique for simplifying large unstructured meshes. Most current techniques simplify the geometry of the mesh using edge collapses. Our technique simplifies an underlying scalar field directly by segmenting the original scalar field into two pieces: the boundary of the original domain and the interior samples of the scalar field. We then simplify each piece separately, taking into account proper error bounds. Finally, we combine the simplified domain boundary and scalar field into a complete, simplified mesh that can be visualized with standard unstructured-data visualization tools. Our technique is much faster than edge-collapse-based simplification approaches. Furthermore, it is particularly suitable for aggressive simplification. Experiments show that isosurfaces and volume renderings of meshes produced by our technique have few noticeable visual artifacts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Tetrahedral meshes are widely used in scientific computing for representing 3D scalar, vector, and tensor fields. The size and complexity of some of these meshes can limit the performance of many visualization algorithms, making it hard to achieve interactive visualization. The use of simplified models is one way to enable the real-time exploration of these datasets. In this paper, we propose a novel technique for simplifying large unstructured meshes. Most current techniques simplify the geometry of the mesh using edge collapses. Our technique simplifies an underlying scalar field directly by segmenting the original scalar field into two pieces: the boundary of the original domain and the interior samples of the scalar field. We then simplify each piece separately, taking into account proper error bounds. Finally, we combine the simplified domain boundary and scalar field into a complete, simplified mesh that can be visualized with standard unstructured-data visualization tools. Our technique is much faster than edge-collapse-based simplification approaches. Furthermore, it is particularly suitable for aggressive simplification. Experiments show that isosurfaces and volume renderings of meshes produced by our technique have few noticeable visual artifacts.",
"fno": "01500537",
"keywords": [
"Data Visualisation",
"Mesh Generation",
"Computational Geometry",
"Computational Complexity",
"Rendering Computer Graphics",
"Mesh Simplification",
"Unstructured Tetrahedral Meshes",
"Point Sampling",
"Scalar Field Segmentation",
"Error Bounds",
"Domain Boundary",
"Unstructured Data Visualization Tools",
"Volume Renderings",
"Mesh Generation",
"Computational Geometry",
"Computational Complexity",
"Sampling Methods",
"Scientific Computing",
"Shape",
"Rendering Computer Graphics",
"Carbon Capture And Storage",
"Computer Graphics",
"Computational Geometry",
"Solid Modeling",
"Data Visualization",
"Piecewise Linear Techniques"
],
"authors": [
{
"affiliation": "Inst. of Sci. Comput. & Imaging, Utah Univ., Salt Lake City, UT, USA",
"fullName": "D. Uesu",
"givenName": "D.",
"surname": "Uesu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Sci. Comput. & Imaging, Utah Univ., Salt Lake City, UT, USA",
"fullName": "L. Bavoil",
"givenName": "L.",
"surname": "Bavoil",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Sci. Comput. & Imaging, Utah Univ., Salt Lake City, UT, USA",
"fullName": "S. Fleishman",
"givenName": "S.",
"surname": "Fleishman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Sci. Comput. & Imaging, Utah Univ., Salt Lake City, UT, USA",
"fullName": "J. Shepherd",
"givenName": "J.",
"surname": "Shepherd",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Sci. Comput. & Imaging, Utah Univ., Salt Lake City, UT, USA",
"fullName": "C.T. Silva",
"givenName": "C.T.",
"surname": "Silva",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-03-01T00:00:00",
"pubType": "proceedings",
"pages": "157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238",
"year": "2005",
"issn": "1727-8376",
"isbn": "3-905673-26-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01500536",
"articleId": "12OmNywOWNT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01500538",
"articleId": "12OmNzT7Otj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/tpcg/2004/2137/0/21370211",
"title": "Normal-Based Simplification Algorithm for Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/tpcg/2004/21370211/12OmNC8Mswc",
"parentPublication": {
"id": "proceedings/tpcg/2004/2137/0",
"title": "Theory and Practice of Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1998/9176/0/91760287",
"title": "Simplification of Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1998/91760287/12OmNCeK2eN",
"parentPublication": {
"id": "proceedings/ieee-vis/1998/9176/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1998/9176/0/00745285",
"title": "A general method for preserving attribute values on simplified meshes",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1998/00745285/12OmNvoFjVh",
"parentPublication": {
"id": "proceedings/visual/1998/9176/0",
"title": "Proceedings of Visualization '98",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2004/2171/0/21710174",
"title": "Simplification of Vector Fields over Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710174/12OmNwtWfFj",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2012/1365/0/06274677",
"title": "Simplification Algorithm for General Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2012/06274677/12OmNxzMo0O",
"parentPublication": {
"id": "proceedings/cso/2012/1365/0",
"title": "2012 Fifth International Joint Conference on Computational Sciences and Optimization (CSO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2001/7200/0/7200shaffer",
"title": "Efficient Adaptive Simplification of Massive Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2001/7200shaffer/12OmNzV70Bx",
"parentPublication": {
"id": "proceedings/ieee-vis/2001/7200/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2003/1946/0/19460034",
"title": "An Efficient Mesh Simplification Method with Feature Detection for Unstructured Meshes and Web Graphics",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2003/19460034/12OmNzXFozY",
"parentPublication": {
"id": "proceedings/cgi/2003/1946/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/03/v0224",
"title": "Simplification of Tetrahedral Meshes with Error Bounds",
"doi": null,
"abstractUrl": "/journal/tg/1999/03/v0224/13rRUxly95r",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/01/v0029",
"title": "Selective Refinement Queries for Volume Visualization of Unstructured Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2004/01/v0029/13rRUyY28Yi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/01/04015405",
"title": "Streaming Simplification of Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2007/01/04015405/13rRUyY28Yk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzuIjee",
"title": "Digital Media and Digital Content Management, Workshop on",
"acronym": "dmdcm",
"groupId": "1800440",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBUAvXW",
"doi": "10.1109/DMDCM.2011.53",
"title": "Optimized Motion Capture System for Full Body Human Motion Capturing Case Study of Educational Institution and Small Animation Production",
"normalizedTitle": "Optimized Motion Capture System for Full Body Human Motion Capturing Case Study of Educational Institution and Small Animation Production",
"abstract": "Motion capture system or MOCAP is a set of devices used for capturing moving objects. In addition to had used in the scientific community, Medical, Engineering, MOCAP is currently being used extensively in film and animation industry to create realistic movement of the characters and cartoons. A popular MOCAP system used to capture the movement is Optical Motion Capture, called Optical MOCAP that is able to apply a variety of object motions. Nowadays, the price of MOCAP system is high. And if the user uses full system of MOCAP to capture the movement of actor, some incorrect movement data occurred. This reason, the user needs to take a lot of effort to correct the movement data. With the high price and used effort, making institution or manufacturer difficult for decision-making provides the MOCAP to use. This research has the idea to study the adjustment device of MOCAP system that has minimal system and proper motions for basic of full body human movement, including of walking, running and jumping. By adjusting the number of cameras, the number of reflect markers, the placement of the camera. To capture the movement of the display area 10 square meters with review data movement and the movement of the cartoon show in real-time motions. The results showed that the number of cameras 4-6 to capture the movements of the actors in the area of 10 square meters and a height of 2.5 meters with digital camera, named Eagle Digital, placed at different. Use the least 29 points of reflection markers placed on actor, gait walking, running and jumping. There are both important moving markers and referencing markers. The importance points must placed the marker are actor head and a long of the spine. The results of this study will work with the amount of data movement is reduced. As well as to decide to use MOCAP equipment to suit the job they need.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion capture system or MOCAP is a set of devices used for capturing moving objects. In addition to had used in the scientific community, Medical, Engineering, MOCAP is currently being used extensively in film and animation industry to create realistic movement of the characters and cartoons. A popular MOCAP system used to capture the movement is Optical Motion Capture, called Optical MOCAP that is able to apply a variety of object motions. Nowadays, the price of MOCAP system is high. And if the user uses full system of MOCAP to capture the movement of actor, some incorrect movement data occurred. This reason, the user needs to take a lot of effort to correct the movement data. With the high price and used effort, making institution or manufacturer difficult for decision-making provides the MOCAP to use. This research has the idea to study the adjustment device of MOCAP system that has minimal system and proper motions for basic of full body human movement, including of walking, running and jumping. By adjusting the number of cameras, the number of reflect markers, the placement of the camera. To capture the movement of the display area 10 square meters with review data movement and the movement of the cartoon show in real-time motions. The results showed that the number of cameras 4-6 to capture the movements of the actors in the area of 10 square meters and a height of 2.5 meters with digital camera, named Eagle Digital, placed at different. Use the least 29 points of reflection markers placed on actor, gait walking, running and jumping. There are both important moving markers and referencing markers. The importance points must placed the marker are actor head and a long of the spine. The results of this study will work with the amount of data movement is reduced. As well as to decide to use MOCAP equipment to suit the job they need.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion capture system or MOCAP is a set of devices used for capturing moving objects. In addition to had used in the scientific community, Medical, Engineering, MOCAP is currently being used extensively in film and animation industry to create realistic movement of the characters and cartoons. A popular MOCAP system used to capture the movement is Optical Motion Capture, called Optical MOCAP that is able to apply a variety of object motions. Nowadays, the price of MOCAP system is high. And if the user uses full system of MOCAP to capture the movement of actor, some incorrect movement data occurred. This reason, the user needs to take a lot of effort to correct the movement data. With the high price and used effort, making institution or manufacturer difficult for decision-making provides the MOCAP to use. This research has the idea to study the adjustment device of MOCAP system that has minimal system and proper motions for basic of full body human movement, including of walking, running and jumping. By adjusting the number of cameras, the number of reflect markers, the placement of the camera. To capture the movement of the display area 10 square meters with review data movement and the movement of the cartoon show in real-time motions. The results showed that the number of cameras 4-6 to capture the movements of the actors in the area of 10 square meters and a height of 2.5 meters with digital camera, named Eagle Digital, placed at different. Use the least 29 points of reflection markers placed on actor, gait walking, running and jumping. There are both important moving markers and referencing markers. The importance points must placed the marker are actor head and a long of the spine. The results of this study will work with the amount of data movement is reduced. As well as to decide to use MOCAP equipment to suit the job they need.",
"fno": "4413a117",
"keywords": [
"Animation",
"Motion Capture",
"MOCAP",
"Human Movement Capturing",
"Optimization"
],
"authors": [
{
"affiliation": null,
"fullName": "Suwich Tirakoat",
"givenName": "Suwich",
"surname": "Tirakoat",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dmdcm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-05-01T00:00:00",
"pubType": "proceedings",
"pages": "117-120",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4413-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4413a111",
"articleId": "12OmNrIrPvj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4413a121",
"articleId": "12OmNCbCs1C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480809",
"title": "Identifying Motion Capture Tracking Markers with Self-Organizing Maps",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480809/12OmNAObbEt",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmai/2007/2901/0/29010091",
"title": "Exploring Motion Sequence of Virtual Characters: Experimenting Motion Capture Variables",
"doi": null,
"abstractUrl": "/proceedings-article/gmai/2007/29010091/12OmNButq0m",
"parentPublication": {
"id": "proceedings/gmai/2007/2901/0",
"title": "2007 Geometric Modeling and Imaging: New Advances",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcse/2009/3570/1/3570a234",
"title": "Recognition of Human Actions Using Motion Capture Data and Support Vector Machine",
"doi": null,
"abstractUrl": "/proceedings-article/wcse/2009/3570a234/12OmNqJHFpJ",
"parentPublication": {
"id": "proceedings/wcse/2009/3570/1",
"title": "2009 WRI World Congress on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890335",
"title": "Hierarchical facial expression animation by motion capture data",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890335/12OmNx76TWi",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2004/2178/0/21780052",
"title": "A Study of Practical Approach of Using Motion Capture and Keyframe Animation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2004/21780052/12OmNy3AgDN",
"parentPublication": {
"id": "proceedings/cgiv/2004/2178/0",
"title": "Proceedings. International Conference on Computer Graphics, Imaging and Visualization, 2004. CGIV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1997/7984/0/79840077",
"title": "An Animation Interface Designed for Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1997/79840077/12OmNyRg4fA",
"parentPublication": {
"id": "proceedings/ca/1997/7984/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2009/3888/3/3888c098",
"title": "Modeling of Human Body for Animation by Micro-sensor Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2009/3888c098/12OmNzRZq1u",
"parentPublication": {
"id": "proceedings/kam/2009/3888/1",
"title": "Knowledge Acquisition and Modeling, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770849",
"title": "A Study on Practical Approach of Using Motion Capture and Keyframe Animation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770849/12OmNzdoMvf",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a426",
"title": "A Multiview Depth-based Motion Capture Benchmark Dataset for Human Motion Denoising and Enhancement Research",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a426/1G56Sk1m9ag",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a017",
"title": "Model Retargeting Motion Capture System Based on Kinect Gesture Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a017/1vg7ZoRlguk",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAkEU4f",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqFrGC9",
"doi": "10.1109/ICME.2011.6011934",
"title": "Hybrid low-delay compression of motion capture data",
"normalizedTitle": "Hybrid low-delay compression of motion capture data",
"abstract": "Motion Capture (MoCap) is becoming important in many areas of technology, science, and art, including graphics, visualization, gaming, and medical applications. In parallel with its increased use and abundance, compression of this kind of data is becoming more important. In this paper, we propose a hybrid low-delay compression scheme for MoCap data that is particularly suitable for interactive applications such as online gaming or telemedicine. Experimental results confirm the superiority of the proposed approach against state-of-the-art methods for MoCap compression in both compression efficiency and delay, making it suitable for interactive applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion Capture (MoCap) is becoming important in many areas of technology, science, and art, including graphics, visualization, gaming, and medical applications. In parallel with its increased use and abundance, compression of this kind of data is becoming more important. In this paper, we propose a hybrid low-delay compression scheme for MoCap data that is particularly suitable for interactive applications such as online gaming or telemedicine. Experimental results confirm the superiority of the proposed approach against state-of-the-art methods for MoCap compression in both compression efficiency and delay, making it suitable for interactive applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion Capture (MoCap) is becoming important in many areas of technology, science, and art, including graphics, visualization, gaming, and medical applications. In parallel with its increased use and abundance, compression of this kind of data is becoming more important. In this paper, we propose a hybrid low-delay compression scheme for MoCap data that is particularly suitable for interactive applications such as online gaming or telemedicine. Experimental results confirm the superiority of the proposed approach against state-of-the-art methods for MoCap compression in both compression efficiency and delay, making it suitable for interactive applications.",
"fno": "06011934",
"keywords": [
"Discrete Cosine Transforms",
"Encoding",
"Delay",
"Principal Component Analysis",
"Quantization",
"Humans",
"Redundancy",
"Motion Capture",
"Mo Cap Data Compression",
"Transform Coding",
"Hybrid Coding"
],
"authors": [
{
"affiliation": "School of Engineering Science, Simon Fraser University, Burnaby, BC, V5A 1S6, Canada",
"fullName": "Choong-Hoon Kwak",
"givenName": null,
"surname": "Choong-Hoon Kwak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Engineering Science, Simon Fraser University, Burnaby, BC, V5A 1S6, Canada",
"fullName": "Ivan V. Bajić",
"givenName": "Ivan V.",
"surname": "Bajić",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2011",
"issn": "1945-7871",
"isbn": "978-1-61284-348-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06011933",
"articleId": "12OmNzXFoLd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06011935",
"articleId": "12OmNyv7mgK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2011/348/0/06012175",
"title": "Error concealment strategies for Motion Capture data streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012175/12OmNA0dMIa",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480809",
"title": "Identifying Motion Capture Tracking Markers with Self-Organizing Maps",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480809/12OmNAObbEt",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1993/4120/0/00342320",
"title": "Effects of multispectral compression on machine exploitation",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1993/00342320/12OmNB9t6yH",
"parentPublication": {
"id": "proceedings/acssc/1993/4120/0",
"title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a084",
"title": "Feature Vector Compression Based on Least Error Quantization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a084/12OmNs4S8yi",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04285074",
"title": "Fast and Robust Motion Tracking for Time-Varying Mesh Featuring Reeb-Graph-Based Skeleton Fitting and its Application to Motion Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04285074/12OmNx19jWi",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2006/2825/0/04155734",
"title": "Compression of Human Motion Data Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/04155734/12OmNzwZ6gW",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/01/v0005",
"title": "Human Motion Capture Data Compression by Model-Based Indexing: A Power Aware Approach",
"doi": null,
"abstractUrl": "/journal/tg/2007/01/v0005/13rRUB7a1fI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042272",
"title": "Human Motion Capture Data Tailored Transform Coding",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042272/13rRUytWF9m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486541",
"title": "Feature Aware 3D Mesh Compression Using Robust Principal Component Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486541/14jQfOi8KF0",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a049",
"title": "A Novel Dynamic Mesh Sequence Compression Framework for Progressive Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a049/1ap5AeYxipO",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqHqSlX",
"doi": "",
"title": "Human actions recognition from streamed Motion Capture",
"normalizedTitle": "Human actions recognition from streamed Motion Capture",
"abstract": "This paper introduces a new method for streamed action recognition using Motion Capture (MoCap) data. First, the histograms of action poses, extracted from MoCap data, are computed according to Hausdorf distance. Then, using a dynamic programming algorithm and an incremental histogram computation, our proposed solution recognizes actions in real time from streams of poses. The comparison of histograms for recognition was achieved using Bhattacharyya distance. Furthermore, the learning phase has remained very efficient with respect to both time and complexity. We have shown the effectiveness of our solution by testing it on large datasets, obtained from animation databases. In particular, we were able to achieve excellent recognition rates that have outperformed the existing methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a new method for streamed action recognition using Motion Capture (MoCap) data. First, the histograms of action poses, extracted from MoCap data, are computed according to Hausdorf distance. Then, using a dynamic programming algorithm and an incremental histogram computation, our proposed solution recognizes actions in real time from streams of poses. The comparison of histograms for recognition was achieved using Bhattacharyya distance. Furthermore, the learning phase has remained very efficient with respect to both time and complexity. We have shown the effectiveness of our solution by testing it on large datasets, obtained from animation databases. In particular, we were able to achieve excellent recognition rates that have outperformed the existing methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a new method for streamed action recognition using Motion Capture (MoCap) data. First, the histograms of action poses, extracted from MoCap data, are computed according to Hausdorf distance. Then, using a dynamic programming algorithm and an incremental histogram computation, our proposed solution recognizes actions in real time from streams of poses. The comparison of histograms for recognition was achieved using Bhattacharyya distance. Furthermore, the learning phase has remained very efficient with respect to both time and complexity. We have shown the effectiveness of our solution by testing it on large datasets, obtained from animation databases. In particular, we were able to achieve excellent recognition rates that have outperformed the existing methods.",
"fno": "06460994",
"keywords": [
"Dynamic Programming",
"Image Enhancement",
"Image Motion Analysis",
"Learning Artificial Intelligence",
"Pose Estimation",
"Human Actions Recognition",
"Streamed Motion Capture Data",
"Hausdorf Distance",
"Dynamic Programming Algorithm",
"Incremental Histogram Computation",
"Bhattacharyya Distance",
"Learning Phase",
"Animation Databases",
"Complexity",
"Streamed Action Recognition",
"Action Poses",
"Histograms",
"Humans",
"Real Time Systems",
"Databases",
"Hidden Markov Models",
"Training",
"Dynamic Programming"
],
"authors": [
{
"affiliation": "Université de Lyon, CNRS Université Lyon 1, LIRIS, UMR5205, F-69622 France",
"fullName": "Mathieu Barnachon",
"givenName": "Mathieu",
"surname": "Barnachon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Université de Lyon, CNRS Université Lyon 1, LIRIS, UMR5205, F-69622 France",
"fullName": "Saïda Bouakaz",
"givenName": "Saïda",
"surname": "Bouakaz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science University of Windsor, Windsor, ON, Canada N9B 3P4",
"fullName": "Boubakeur Boufama",
"givenName": "Boubakeur",
"surname": "Boufama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Université de Lyon, CNRS Université Lyon 1, LIRIS, UMR5205, F-69622 France",
"fullName": "Erwan Guillou",
"givenName": "Erwan",
"surname": "Guillou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "3807-3810",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460993",
"articleId": "12OmNwFicYJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460995",
"articleId": "12OmNzC5Tr5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2008/2174/0/04761434",
"title": "Human action recognition with line and flow histograms",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761434/12OmNARAnbp",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460167",
"title": "Pose based activity recognition using Multiple Kernel learning",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460167/12OmNC1Y5mU",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a421",
"title": "A Multimodal Approach for Recognizing Human Actions Using Depth Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a421/12OmNqJZgEi",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b591",
"title": "Deep Representation Learning for Human Motion Prediction and Classification",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b591/12OmNrHB1Xw",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130480",
"title": "An X-T slice based method for action recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130480/12OmNwdL7f1",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460909",
"title": "Combining gradient histograms using orientation tensors for human action recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460909/12OmNxdDFzO",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239233",
"title": "View invariant human action recognition using histograms of 3D joints",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239233/12OmNzV70Px",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460996",
"title": "Correlations between 48 human actions improve their detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460996/12OmNzsJ7J6",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/11/ttp2013112782",
"title": "Temporal Localization of Actions with Actoms",
"doi": null,
"abstractUrl": "/journal/tp/2013/11/ttp2013112782/13rRUNvyagi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/12/06826537",
"title": "Learning Human Actions by Combining Global Dynamics and Local Appearance",
"doi": null,
"abstractUrl": "/journal/tp/2014/12/06826537/13rRUxD9gYM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNz2TCud",
"title": "Multimedia Computing and Systems, International Conference on",
"acronym": "icmcs",
"groupId": "1000479",
"volume": "2",
"displayVolume": "2",
"year": "1999",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvk7JKX",
"doi": "10.1109/MMCS.1999.778574",
"title": "Video Indexing Using MPEG Motion Compensation Vectors",
"normalizedTitle": "Video Indexing Using MPEG Motion Compensation Vectors",
"abstract": "In the last years a lot of work has been done on color, textural, structural and semantic indexing of \"content-based\" video databases. Motion-based video indexing has been less explored, with approaches generally based on the analysis of optical flows. Compressed videos require the decompression of the sequences and the computation of optical flows, two steps computationally heavy.In this paper we propose some methods to index videos by motion features (mainly related to camera motion) and by motion-based spatial segmentation of frames, in a fully automatic way. Our idea is to use MPEG motion vectors as an alternative to optical flows. Their extraction is very simple and fast; it doesn't require a full decompression of the stream and saves us from computing optical flows. Additional computational economy comes from having one motion vector each 16x16 sub-image; this makes the algorithms faster than working with dense optical flows. Experimental results reported at the end of this paper show that MPEG motion compensation vectors are suitable for this kind of applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the last years a lot of work has been done on color, textural, structural and semantic indexing of \"content-based\" video databases. Motion-based video indexing has been less explored, with approaches generally based on the analysis of optical flows. Compressed videos require the decompression of the sequences and the computation of optical flows, two steps computationally heavy.In this paper we propose some methods to index videos by motion features (mainly related to camera motion) and by motion-based spatial segmentation of frames, in a fully automatic way. Our idea is to use MPEG motion vectors as an alternative to optical flows. Their extraction is very simple and fast; it doesn't require a full decompression of the stream and saves us from computing optical flows. Additional computational economy comes from having one motion vector each 16x16 sub-image; this makes the algorithms faster than working with dense optical flows. Experimental results reported at the end of this paper show that MPEG motion compensation vectors are suitable for this kind of applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the last years a lot of work has been done on color, textural, structural and semantic indexing of \"content-based\" video databases. Motion-based video indexing has been less explored, with approaches generally based on the analysis of optical flows. Compressed videos require the decompression of the sequences and the computation of optical flows, two steps computationally heavy.In this paper we propose some methods to index videos by motion features (mainly related to camera motion) and by motion-based spatial segmentation of frames, in a fully automatic way. Our idea is to use MPEG motion vectors as an alternative to optical flows. Their extraction is very simple and fast; it doesn't require a full decompression of the stream and saves us from computing optical flows. Additional computational economy comes from having one motion vector each 16x16 sub-image; this makes the algorithms faster than working with dense optical flows. Experimental results reported at the end of this paper show that MPEG motion compensation vectors are suitable for this kind of applications.",
"fno": "02530725",
"keywords": [
"Image And Video Databases",
"Content Based Indexing And Retrieval",
"MPEG Motion Vectors"
],
"authors": [
{
"affiliation": "University of Palermo",
"fullName": "E. Ardizzone",
"givenName": "E.",
"surname": "Ardizzone",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Palermo",
"fullName": "M. La Cascia",
"givenName": "M. La",
"surname": "Cascia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Palermo",
"fullName": "A. Avanzato",
"givenName": "A.",
"surname": "Avanzato",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Palermo",
"fullName": "A. Bruna",
"givenName": "A.",
"surname": "Bruna",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmcs",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1999-06-01T00:00:00",
"pubType": "proceedings",
"pages": "725",
"year": "1999",
"issn": "1530-2032",
"isbn": "0-7695-0253-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "02530720",
"articleId": "12OmNx6g6kg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "02530730",
"articleId": "12OmNx4Q6F4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyqRndY",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "1",
"displayVolume": "1",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNywxlMM",
"doi": "10.1109/ICPR.2000.905529",
"title": "Memory-based moving object extraction for video indexing",
"normalizedTitle": "Memory-based moving object extraction for video indexing",
"abstract": "Extracting moving objects from a video shot provides a good low-level representation of videos. It provides object trajectory, color, shape characteristics. Combined with specific domain knowledge, it can be a powerful cue as what is going in a video shot. The paper proposes an unsupervised moving object extraction/tracking system that attempts to capture salient moving objects from an image sequence. The novelty of the proposed system lies in that it requires no object initialization and it is aimed to tolerate noisy segmentations at individual frame level. A temporal stack structure is used as a memory device to filter and learn salient objects. The learning of moving objects takes a bottom-up approach, moving from independent motion segmentation results at each frame level to a learned whole object characteristics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Extracting moving objects from a video shot provides a good low-level representation of videos. It provides object trajectory, color, shape characteristics. Combined with specific domain knowledge, it can be a powerful cue as what is going in a video shot. The paper proposes an unsupervised moving object extraction/tracking system that attempts to capture salient moving objects from an image sequence. The novelty of the proposed system lies in that it requires no object initialization and it is aimed to tolerate noisy segmentations at individual frame level. A temporal stack structure is used as a memory device to filter and learn salient objects. The learning of moving objects takes a bottom-up approach, moving from independent motion segmentation results at each frame level to a learned whole object characteristics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Extracting moving objects from a video shot provides a good low-level representation of videos. It provides object trajectory, color, shape characteristics. Combined with specific domain knowledge, it can be a powerful cue as what is going in a video shot. The paper proposes an unsupervised moving object extraction/tracking system that attempts to capture salient moving objects from an image sequence. The novelty of the proposed system lies in that it requires no object initialization and it is aimed to tolerate noisy segmentations at individual frame level. A temporal stack structure is used as a memory device to filter and learn salient objects. The learning of moving objects takes a bottom-up approach, moving from independent motion segmentation results at each frame level to a learned whole object characteristics.",
"fno": "00905529",
"keywords": [
"Motion Estimation",
"Image Sequences",
"Database Indexing",
"Video Databases",
"Image Segmentation",
"Unsupervised Learning",
"Memory Based Moving Object Extraction",
"Video Indexing",
"Video Shot",
"Low Level Representation",
"Object Trajectory",
"Object Color",
"Object Shape",
"Domain Knowledge",
"Unsupervised Moving Object Extraction Tracking System",
"Noisy Segmentations",
"Temporal Stack Structure",
"Bottom Up Approach",
"Independent Motion Segmentation",
"Whole Object Characteristics",
"Indexing",
"Motion Estimation",
"Filters",
"Motion Segmentation",
"Computer Vision",
"Shape",
"Humans",
"MPEG 4 Standard",
"Robustness",
"Filtering"
],
"authors": [
{
"affiliation": "Image Formation & Process. Group, Illinois Univ., Urbana, IL, USA",
"fullName": "R. Ruoyu Wang",
"givenName": "R.",
"surname": "Ruoyu Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "P. Hong",
"givenName": "P.",
"surname": "Hong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "T. Huang",
"givenName": "T.",
"surname": "Huang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-01-01T00:00:00",
"pubType": "proceedings",
"pages": "811,812,813,814",
"year": "2000",
"issn": "1051-4651",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07501742",
"articleId": "12OmNBqdr4R",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07501746",
"articleId": "12OmNx7ouPJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2000/0750/1/07501811",
"title": "Memory-Based Moving Object Extraction for Video Indexing",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/07501811/12OmNrkBwuk",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdew/2013/5303/0/06547473",
"title": "Indexing and querying moving objects in indoor spaces",
"doi": null,
"abstractUrl": "/proceedings-article/icdew/2013/06547473/12OmNs0C9Wy",
"parentPublication": {
"id": "proceedings/icdew/2013/5303/0",
"title": "2013 IEEE 29th International Conference on Data Engineering Workshops (ICDEW 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2012/4779/0/4779a038",
"title": "Indexing Moving Objects in Indoor Cellular Space",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2012/4779a038/12OmNvSbBz0",
"parentPublication": {
"id": "proceedings/nbis/2012/4779/0",
"title": "2012 15th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichit/2006/2674/1/04021100",
"title": "Salient Motion Information Detection Technique Using Weighted Subtraction Image and Motion Vector",
"doi": null,
"abstractUrl": "/proceedings-article/ichit/2006/04021100/12OmNwIpNpe",
"parentPublication": {
"id": "proceedings/ichit/2006/2674/1",
"title": "2006 International Conference on Hybrid Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2000/0750/3/00903738",
"title": "Detection and tracking of moving objects using a new level set based method",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/00903738/12OmNwkhTfl",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607695",
"title": "Segmentation-based extraction of important objects from video for object-based indexing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607695/12OmNxwENQl",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/3/81833276",
"title": "Robust object extraction method using three cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81833276/12OmNyuy9VF",
"parentPublication": {
"id": "proceedings/icip/1997/8183/3",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbmi/2009/3662/0/3662a201",
"title": "Motion Vector Based Moving Object Detection and Tracking in the MPEG Compressed Domain",
"doi": null,
"abstractUrl": "/proceedings-article/cbmi/2009/3662a201/12OmNzBOi5Y",
"parentPublication": {
"id": "proceedings/cbmi/2009/3662/0",
"title": "Content-Based Multimedia Indexing, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasfaa/2003/1895/0/18950175",
"title": "Q+Rtree: Efficient Indexing for Moving Object Databases",
"doi": null,
"abstractUrl": "/proceedings-article/dasfaa/2003/18950175/12OmNzlUKEq",
"parentPublication": {
"id": "proceedings/dasfaa/2003/1895/0",
"title": "Proceedings Eighth International Conference on Database Systems for Advanced Applications (DASFAA 2003)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2002/10/t1124",
"title": "Query Indexing and Velocity Constrained Indexing: Scalable Techniques for Continuous Queries on Moving Objects",
"doi": null,
"abstractUrl": "/journal/tc/2002/10/t1124/13rRUIJuxoF",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1G55WEFExd6",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G56Sk1m9ag",
"doi": "10.1109/CVPRW56347.2022.00058",
"title": "A Multiview Depth-based Motion Capture Benchmark Dataset for Human Motion Denoising and Enhancement Research",
"normalizedTitle": "A Multiview Depth-based Motion Capture Benchmark Dataset for Human Motion Denoising and Enhancement Research",
"abstract": "The field of human motion enhancement is a rapidly expanding field of study in which depth-based motion capture (D-Mocap) is improved to generate a more accurate counterpart for demanding high precision real-world applications. The D-Mocap that is initially generated relies on commercially available SDKs or open source tools to produce the initial skeletal sequence which works best in an ideal front-facing camera setup. This in turn creates a challenging initialization for human motion enhancement when the camera is not positioned in the ideal forward facing position. Currently there are no multiview D-Mocap datasets which have corresponding time-synced and skeleton-matched optical motion capture (Mocap) reference data for view-invariant motion enhancement. We develop a multiview D-Mocap dataset extended from the popular and comprehensive Berkeley MHAD dataset [29]. In addition, we analyze the performance of the D-Mocap data generated through a series of open source tools, highlighting the difficulty and the need to produce robust results in a rear-facing camera setup due to a 21.4% increase in average joint position error over front-facing data. Finally, we analyze the results of some recent human motion enhancement algorithms with regard to a front-facing camera setup versus a rear-facing one.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The field of human motion enhancement is a rapidly expanding field of study in which depth-based motion capture (D-Mocap) is improved to generate a more accurate counterpart for demanding high precision real-world applications. The D-Mocap that is initially generated relies on commercially available SDKs or open source tools to produce the initial skeletal sequence which works best in an ideal front-facing camera setup. This in turn creates a challenging initialization for human motion enhancement when the camera is not positioned in the ideal forward facing position. Currently there are no multiview D-Mocap datasets which have corresponding time-synced and skeleton-matched optical motion capture (Mocap) reference data for view-invariant motion enhancement. We develop a multiview D-Mocap dataset extended from the popular and comprehensive Berkeley MHAD dataset [29]. In addition, we analyze the performance of the D-Mocap data generated through a series of open source tools, highlighting the difficulty and the need to produce robust results in a rear-facing camera setup due to a 21.4% increase in average joint position error over front-facing data. Finally, we analyze the results of some recent human motion enhancement algorithms with regard to a front-facing camera setup versus a rear-facing one.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The field of human motion enhancement is a rapidly expanding field of study in which depth-based motion capture (D-Mocap) is improved to generate a more accurate counterpart for demanding high precision real-world applications. The D-Mocap that is initially generated relies on commercially available SDKs or open source tools to produce the initial skeletal sequence which works best in an ideal front-facing camera setup. This in turn creates a challenging initialization for human motion enhancement when the camera is not positioned in the ideal forward facing position. Currently there are no multiview D-Mocap datasets which have corresponding time-synced and skeleton-matched optical motion capture (Mocap) reference data for view-invariant motion enhancement. We develop a multiview D-Mocap dataset extended from the popular and comprehensive Berkeley MHAD dataset [29]. In addition, we analyze the performance of the D-Mocap data generated through a series of open source tools, highlighting the difficulty and the need to produce robust results in a rear-facing camera setup due to a 21.4% increase in average joint position error over front-facing data. Finally, we analyze the results of some recent human motion enhancement algorithms with regard to a front-facing camera setup versus a rear-facing one.",
"fno": "873900a426",
"keywords": [
"Cameras",
"Image Capture",
"Image Denoising",
"Image Enhancement",
"Image Motion Analysis",
"Image Sensors",
"Image Sequences",
"High Precision Real World Applications",
"Open Source Tools",
"Initial Skeletal Sequence",
"Front Facing Camera Setup",
"Multiview D Mocap Dataset",
"Optical Motion Capture Reference Data",
"View Invariant Motion Enhancement",
"Comprehensive Berkeley MHAD Dataset",
"D Mocap Data",
"Multiview Depth Based Motion Capture Benchmark Dataset",
"Human Motion Denoising",
"Human Motion Enhancement Algorithms",
"Average Joint Position Error",
"Conferences",
"Software Algorithms",
"Noise Reduction",
"Benchmark Testing",
"Cameras",
"Motion Capture",
"Adaptive Optics"
],
"authors": [
{
"affiliation": "Oklahoma State University,School of Electrical and Computer Engineering,Stillwater,OK,74078",
"fullName": "Nate Lannan",
"givenName": "Nate",
"surname": "Lannan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Oklahoma State University,School of Electrical and Computer Engineering,Stillwater,OK,74078",
"fullName": "Le Zhou",
"givenName": "Le",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Oklahoma State University,School of Electrical and Computer Engineering,Stillwater,OK,74078",
"fullName": "Guoliang Fan",
"givenName": "Guoliang",
"surname": "Fan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "426-435",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8739-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "873900a417",
"articleId": "1G56kgubwwU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "873900a436",
"articleId": "1G56QToOMlq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icat/2007/3056/0/30560314",
"title": "A Study on Motion Visualization System Using Motion Capture Data",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2007/30560314/12OmNAFFdI5",
"parentPublication": {
"id": "proceedings/icat/2007/3056/0",
"title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a117",
"title": "Optimized Motion Capture System for Full Body Human Motion Capturing Case Study of Educational Institution and Small Animation Production",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a117/12OmNBUAvXW",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmai/2007/2901/0/29010091",
"title": "Exploring Motion Sequence of Virtual Characters: Experimenting Motion Capture Variables",
"doi": null,
"abstractUrl": "/proceedings-article/gmai/2007/29010091/12OmNButq0m",
"parentPublication": {
"id": "proceedings/gmai/2007/2901/0",
"title": "2007 Geometric Modeling and Imaging: New Advances",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a091",
"title": "A Rigid Structure Matching-Based Noise Data Processing Approach for Human Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a091/12OmNCcKQz4",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2000/0683/0/06830077",
"title": "Skeleton-Based Motion Capture for Robust Reconstruction of Human Motion",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2000/06830077/12OmNwt5sop",
"parentPublication": {
"id": "proceedings/ca/2000/0683/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815025",
"title": "Automatic Motion Capture Data Denoising via Filtered Local Subspace Affinity and Low Rank Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815025/12OmNxj23eV",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2004/2178/0/21780052",
"title": "A Study of Practical Approach of Using Motion Capture and Keyframe Animation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2004/21780052/12OmNy3AgDN",
"parentPublication": {
"id": "proceedings/cgiv/2004/2178/0",
"title": "Proceedings. International Conference on Computer Graphics, Imaging and Visualization, 2004. CGIV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770849",
"title": "A Study on Practical Approach of Using Motion Capture and Keyframe Animation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770849/12OmNzdoMvf",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/05/ttg2010050870",
"title": "Example-Based Human Motion Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2010/05/ttg2010050870/13rRUNvgyWj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a318",
"title": "AR-MoCap: Using Augmented Reality to Support Motion Capture Acting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a318/1MNgvnk65gs",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H2petWxAqI",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"acronym": "cost",
"groupId": "1847867",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H2pr5vTQys",
"doi": "10.1109/CoST57098.2022.00052",
"title": "Research on Virtual Human Development Based on Motion Capture",
"normalizedTitle": "Research on Virtual Human Development Based on Motion Capture",
"abstract": "In the field of film and television animation production, compared with the traditional animation frame point production, the use of motion capture technology can save the time and labor cost of 3D animation production, and effectively improve the smoothness and authenticity of the picture. The authors use inertial sensor-based attitude capture equipment and supporting software to capture people’s dance movements through sensors and controllers. By establishing the virtual character model in 3ds Max software and the virtual stage model in Cinema 4D software, the virtual human and character dance animation with high degree of restoration are made, and the optimized character model is combined with the scene model to produce the virtual character dance animation in the virtual scene. It provides a reference for the improvement of 3D animation technology involving complex character actions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the field of film and television animation production, compared with the traditional animation frame point production, the use of motion capture technology can save the time and labor cost of 3D animation production, and effectively improve the smoothness and authenticity of the picture. The authors use inertial sensor-based attitude capture equipment and supporting software to capture people’s dance movements through sensors and controllers. By establishing the virtual character model in 3ds Max software and the virtual stage model in Cinema 4D software, the virtual human and character dance animation with high degree of restoration are made, and the optimized character model is combined with the scene model to produce the virtual character dance animation in the virtual scene. It provides a reference for the improvement of 3D animation technology involving complex character actions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the field of film and television animation production, compared with the traditional animation frame point production, the use of motion capture technology can save the time and labor cost of 3D animation production, and effectively improve the smoothness and authenticity of the picture. The authors use inertial sensor-based attitude capture equipment and supporting software to capture people’s dance movements through sensors and controllers. By establishing the virtual character model in 3ds Max software and the virtual stage model in Cinema 4D software, the virtual human and character dance animation with high degree of restoration are made, and the optimized character model is combined with the scene model to produce the virtual character dance animation in the virtual scene. It provides a reference for the improvement of 3D animation technology involving complex character actions.",
"fno": "624800a216",
"keywords": [
"Computer Animation",
"Humanities",
"Solid Modelling",
"Virtual Reality",
"Cinema 4 D Software",
"Optimized Character Model",
"Scene Model",
"Virtual Character Dance Animation",
"Virtual Scene",
"Complex Character Actions",
"Virtual Human Development",
"Television Animation Production",
"Traditional Animation Frame Point Production",
"Motion Capture Technology",
"Labor Cost",
"Authenticity",
"Inertial Sensor Based Attitude Capture Equipment",
"Supporting Software",
"People",
"Sensors",
"Virtual Character Model",
"3 Ds Max Software",
"Virtual Stage Model",
"Humanities",
"Solid Modeling",
"Three Dimensional Displays",
"Costs",
"Production",
"Animation",
"Motion Pictures",
"Motion Capture",
"3 D Animation",
"Character Modeling",
"Scene Modeling"
],
"authors": [
{
"affiliation": "Communication University of China,Beijing,China",
"fullName": "Yuxin Yang",
"givenName": "Yuxin",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Communication University of China,Beijing,China",
"fullName": "Xinquan Luo",
"givenName": "Xinquan",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Communication University of China,Beijing,China",
"fullName": "Xinpei Hu",
"givenName": "Xinpei",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cost",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "216-220",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6248-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "624800a211",
"articleId": "1H2pk6Ia4fu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "624800a221",
"articleId": "1H2psp1TLva",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2010/4215/0/4215a046",
"title": "Building Hand Motion-Based Character Animation: The Case of Puppetry",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2010/4215a046/12OmNAle6A0",
"parentPublication": {
"id": "proceedings/cw/2010/4215/0",
"title": "2010 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2009/3963/0/05479097",
"title": "OpenMoCap: An Open Source Software for Optical Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2009/05479097/12OmNvTjZRj",
"parentPublication": {
"id": "proceedings/sbgames/2009/3963/0",
"title": "2009 VIII Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2004/2178/0/21780052",
"title": "A Study of Practical Approach of Using Motion Capture and Keyframe Animation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2004/21780052/12OmNy3AgDN",
"parentPublication": {
"id": "proceedings/cgiv/2004/2178/0",
"title": "Proceedings. International Conference on Computer Graphics, Imaging and Visualization, 2004. CGIV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770849",
"title": "A Study on Practical Approach of Using Motion Capture and Keyframe Animation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770849/12OmNzdoMvf",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/05/07891591",
"title": "Virtual Character Animation Based on Affordable Motion Capture and Reconfigurable Tangible Interfaces",
"doi": null,
"abstractUrl": "/journal/tg/2018/05/07891591/13rRUwjGoLN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08352750",
"title": "Surface Motion Capture Animation Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08352750/13rRUwjXZSl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2011/02/tlt2011020187",
"title": "A Virtual Reality Dance Training System Using Motion Capture Technology",
"doi": null,
"abstractUrl": "/journal/lt/2011/02/tlt2011020187/13rRUxC0SAH",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2020/8771/0/09122354",
"title": "Intellectual Property of Character Design based on Local Content of Ngada District, Indonesia",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2020/09122354/1kRSexOSgc8",
"parentPublication": {
"id": "proceedings/nicoint/2020/8771/0",
"title": "2020 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09200517",
"title": "On the Plausibility of Virtual Body Animation Features in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09200517/1ndVuuNfI64",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eimss/2021/2707/0/270700a022",
"title": "Research on the Application of Digital Media Technology in Sports Dance Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/eimss/2021/270700a022/1yEZQHwiT6w",
"parentPublication": {
"id": "proceedings/eimss/2021/2707/0",
"title": "2021 International Conference on Education, Information Management and Service Science (EIMSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hVlFDUXIs0",
"doi": "10.1109/ICCV.2019.00554",
"title": "AMASS: Archive of Motion Capture As Surface Shapes",
"normalizedTitle": "AMASS: Archive of Motion Capture As Surface Shapes",
"abstract": "Large datasets are the cornerstone of recent advances in computer vision using deep learning. In contrast, existing human motion capture (mocap) datasets are small and the motions limited, hampering progress on learning models of human motion. While there are many different datasets available, they each use a different parameterization of the body, making it difficult to integrate them into a single meta dataset. To address this, we introduce AMASS, a large and varied database of human motion that unifies 15 different optical marker-based mocap datasets by representing them within a common framework and parameterization. We achieve this using a new method, MoSh++, that converts mocap data into realistic 3D human meshes represented by a rigged body model. Here we use SMPL [Loper et al., 2015], which is widely used and provides a standard skeletal representation as well as a fully rigged surface mesh. The method works for arbitrary marker sets, while recovering soft-tissue dynamics and realistic hand motion. We evaluate MoSh++ and tune its hyperparameters using a new dataset of 4D body scans that are jointly recorded with marker-based mocap. The consistent representation of AMASS makes it readily useful for animation, visualization, and generating training data for deep learning. Our dataset is significantly richer than previous human motion collections, having more than 40 hours of motion data, spanning over 300 subjects, more than 11000 motions, and will be publicly available to the research community.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Large datasets are the cornerstone of recent advances in computer vision using deep learning. In contrast, existing human motion capture (mocap) datasets are small and the motions limited, hampering progress on learning models of human motion. While there are many different datasets available, they each use a different parameterization of the body, making it difficult to integrate them into a single meta dataset. To address this, we introduce AMASS, a large and varied database of human motion that unifies 15 different optical marker-based mocap datasets by representing them within a common framework and parameterization. We achieve this using a new method, MoSh++, that converts mocap data into realistic 3D human meshes represented by a rigged body model. Here we use SMPL [Loper et al., 2015], which is widely used and provides a standard skeletal representation as well as a fully rigged surface mesh. The method works for arbitrary marker sets, while recovering soft-tissue dynamics and realistic hand motion. We evaluate MoSh++ and tune its hyperparameters using a new dataset of 4D body scans that are jointly recorded with marker-based mocap. The consistent representation of AMASS makes it readily useful for animation, visualization, and generating training data for deep learning. Our dataset is significantly richer than previous human motion collections, having more than 40 hours of motion data, spanning over 300 subjects, more than 11000 motions, and will be publicly available to the research community.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Large datasets are the cornerstone of recent advances in computer vision using deep learning. In contrast, existing human motion capture (mocap) datasets are small and the motions limited, hampering progress on learning models of human motion. While there are many different datasets available, they each use a different parameterization of the body, making it difficult to integrate them into a single meta dataset. To address this, we introduce AMASS, a large and varied database of human motion that unifies 15 different optical marker-based mocap datasets by representing them within a common framework and parameterization. We achieve this using a new method, MoSh++, that converts mocap data into realistic 3D human meshes represented by a rigged body model. Here we use SMPL [Loper et al., 2015], which is widely used and provides a standard skeletal representation as well as a fully rigged surface mesh. The method works for arbitrary marker sets, while recovering soft-tissue dynamics and realistic hand motion. We evaluate MoSh++ and tune its hyperparameters using a new dataset of 4D body scans that are jointly recorded with marker-based mocap. The consistent representation of AMASS makes it readily useful for animation, visualization, and generating training data for deep learning. Our dataset is significantly richer than previous human motion collections, having more than 40 hours of motion data, spanning over 300 subjects, more than 11000 motions, and will be publicly available to the research community.",
"fno": "480300f441",
"keywords": [
"Computer Animation",
"Computer Vision",
"Data Visualisation",
"Image Capture",
"Image Motion Analysis",
"Image Representation",
"Learning Artificial Intelligence",
"Mesh Generation",
"Pose Estimation",
"Shape Recognition",
"Solid Modelling",
"Stereo Image Processing",
"Deep Learning",
"AMASS",
"Computer Vision",
"Human Motion Capture Datasets",
"Mo Sh",
"Mocap Data",
"Realistic 3 D Human Meshes",
"Archive Of Motion Capture As Surface Shapes",
"SMPL",
"Skeletal Representation",
"Rigged Surface Mesh",
"Data Animation",
"Data Visualization",
"3 D Body Pose",
"Shape",
"Three Dimensional Displays",
"Joints",
"Machine Learning",
"Computational Modeling",
"Computer Vision"
],
"authors": [
{
"affiliation": "Meshcapade GmbH",
"fullName": "Naureen Mahmood",
"givenName": "Naureen",
"surname": "Mahmood",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute Tübingen",
"fullName": "Nima Ghorbani",
"givenName": "Nima",
"surname": "Ghorbani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "York University",
"fullName": "Nikolaus F. Troje",
"givenName": "Nikolaus F.",
"surname": "Troje",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MPII. Germany",
"fullName": "Gerard Pons-Moll",
"givenName": "Gerard",
"surname": "Pons-Moll",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute for Intelligent Systems",
"fullName": "Michael Black",
"givenName": "Michael",
"surname": "Black",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "5441-5450",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300f430",
"articleId": "1hVlIbqDjtC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300f451",
"articleId": "1hVlrum3U5y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2016/8942/0/8942a216",
"title": "Visualising Human Motion: a First Principles Approach using Vicon data in Maya",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a216/12OmNAS9zBl",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206859",
"title": "Markerless Motion Capture with unsynchronized moving cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206859/12OmNqI04JK",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2013/0703/0/06636645",
"title": "Gait recognition based on marker-less 3D motion capture",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2013/06636645/12OmNwwd2Os",
"parentPublication": {
"id": "proceedings/avss/2013/0703/0",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890335",
"title": "Hierarchical facial expression animation by motion capture data",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890335/12OmNx76TWi",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cpsna/2014/5387/0/5387a043",
"title": "SmartPTA: A Smartphone-Based Human Motion Evaluation System",
"doi": null,
"abstractUrl": "/proceedings-article/cpsna/2014/5387a043/12OmNxR5UQl",
"parentPublication": {
"id": "proceedings/cpsna/2014/5387/0",
"title": "2014 IEEE International Conference on Cyber-Physical Systems, Networks, and Applications (CPSNA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08352750",
"title": "Surface Motion Capture Animation Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08352750/13rRUwjXZSl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1097",
"title": "SOMA: Solving Optical Marker-Based MoCap Automatically",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1097/1BmEfLpJrmU",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a061",
"title": "Kinematic Motion Analysis with Volumetric Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a061/1KaH0mB5jwc",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a017",
"title": "Model Retargeting Motion Capture System Based on Kinect Gesture Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a017/1vg7ZoRlguk",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a722",
"title": "BABEL: Bodies, Action and Behavior with English Labels",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a722/1yeL2baLoEE",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzSh1bp",
"title": "2015 2nd International Conference on Information Science and Control Engineering (ICISCE)",
"acronym": "icisce",
"groupId": "1807704",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyL0Tq5",
"doi": "10.1109/ICISCE.2015.164",
"title": "Discussion on Psychological Perception Representation and Knowledge Acquisition of Table Tennis Players",
"normalizedTitle": "Discussion on Psychological Perception Representation and Knowledge Acquisition of Table Tennis Players",
"abstract": "During the process of table tennis sports, the players' psychological situations are always in a rapidly changing. According to the characteristics of table tennis sports and based on computer technology application and artificial intelligence theory, this paper mainly studies psychological perception representation and knowledge acquisition of table tennis players. Then it develops the psychological multimedia information intelligence system to gather and analyses information of table tennis players. It also carries on the training, competition and case analysis to inspect and obtain a good response.",
"abstracts": [
{
"abstractType": "Regular",
"content": "During the process of table tennis sports, the players' psychological situations are always in a rapidly changing. According to the characteristics of table tennis sports and based on computer technology application and artificial intelligence theory, this paper mainly studies psychological perception representation and knowledge acquisition of table tennis players. Then it develops the psychological multimedia information intelligence system to gather and analyses information of table tennis players. It also carries on the training, competition and case analysis to inspect and obtain a good response.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "During the process of table tennis sports, the players' psychological situations are always in a rapidly changing. According to the characteristics of table tennis sports and based on computer technology application and artificial intelligence theory, this paper mainly studies psychological perception representation and knowledge acquisition of table tennis players. Then it develops the psychological multimedia information intelligence system to gather and analyses information of table tennis players. It also carries on the training, competition and case analysis to inspect and obtain a good response.",
"fno": "6850a712",
"keywords": [
"Psychology",
"Games",
"Knowledge Acquisition",
"Multimedia Communication",
"Data Mining",
"Streaming Media",
"Knowledge Acquisition",
"Table Tennis Players",
"Psychological Perception Representation"
],
"authors": [
{
"affiliation": null,
"fullName": "Xinhe Gong",
"givenName": "Xinhe",
"surname": "Gong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Peiliang Ling",
"givenName": "Peiliang",
"surname": "Ling",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xianming Meng",
"givenName": "Xianming",
"surname": "Meng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icisce",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-04-01T00:00:00",
"pubType": "proceedings",
"pages": "712-715",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-6850-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6850a707",
"articleId": "12OmNBO3KdW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6850a716",
"articleId": "12OmNBajTKi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/csie/2009/3507/4/3507d596",
"title": "Discussion on Psychological Decision Support System of Table Tennis Players Based on Multi-agent System",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507d596/12OmNwogh9M",
"parentPublication": {
"id": "proceedings/csie/2009/3507/4",
"title": "Computer Science and Information Engineering, World Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2013/0703/0/06636679",
"title": "An automatic system for sports analytics in multi-camera tennis videos",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2013/06636679/12OmNxVDuOl",
"parentPublication": {
"id": "proceedings/avss/2013/0703/0",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571164",
"title": "Real-Time Immersive Table Tennis Game for Two Players with Motion Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571164/12OmNyeECD6",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017600",
"title": "iTTVis: Interactive Visualization of Table Tennis Data",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017600/13rRUyY28YD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807264",
"title": "Tac-Simur: Tactic-based Simulative Visual Analytics of Table Tennis",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807264/1cG6vo24hRC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2018/6956/0/695600a325",
"title": "Experimental Research on Applying Imagery Training Method in Teenager Table Tennis Training Classes",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2018/695600a325/1dUo2B83mqk",
"parentPublication": {
"id": "proceedings/icnisc/2018/6956/0",
"title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2021/3892/0/389200a632",
"title": "Tactical Decision System of Table Tennis Match based on C4.5 Decision Tree",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2021/389200a632/1t2nmIZ5RBe",
"parentPublication": {
"id": "proceedings/icmtma/2021/3892/0",
"title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09446582",
"title": "Performance Improvement and Skill Transfer in Table Tennis Through Training in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09446582/1u8lz4qWghi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tcs/2021/2910/0/291000a533",
"title": "A Study of Liu Shiwen’s Table Tennis Techniques and Tactics Based on Computer-aided Video",
"doi": null,
"abstractUrl": "/proceedings-article/tcs/2021/291000a533/1wRIl8gP8xW",
"parentPublication": {
"id": "proceedings/tcs/2021/2910/0",
"title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900e571",
"title": "Table Tennis Stroke Recognition Using Two-Dimensional Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900e571/1yJYs5Lr65W",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNz61dBt",
"title": "2010 14th International Conference Information Visualisation",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyeECD6",
"doi": "10.1109/IV.2010.97",
"title": "Real-Time Immersive Table Tennis Game for Two Players with Motion Tracking",
"normalizedTitle": "Real-Time Immersive Table Tennis Game for Two Players with Motion Tracking",
"abstract": "Presented in this paper is a novel real-time virtual reality game developed to enable two participants to play table tennis immersively with each other's avatar in a shared virtual environment. It uses a wireless hybrid inertial and ultrasonic tracking system to provide the positions and orientations of both the head (view point) and hand (racket) of each player, as well as two large rear-projection stereoscopic screens to provide a view-dependent 3D display of the game environment. Additionally, a physics-based ball animation model is designed for the game, which includes fast detection of the ball colliding with table, net and quick moving rackets. The system is shown to offer some unique features and form a good platform for development of other immersive games for multiple players.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presented in this paper is a novel real-time virtual reality game developed to enable two participants to play table tennis immersively with each other's avatar in a shared virtual environment. It uses a wireless hybrid inertial and ultrasonic tracking system to provide the positions and orientations of both the head (view point) and hand (racket) of each player, as well as two large rear-projection stereoscopic screens to provide a view-dependent 3D display of the game environment. Additionally, a physics-based ball animation model is designed for the game, which includes fast detection of the ball colliding with table, net and quick moving rackets. The system is shown to offer some unique features and form a good platform for development of other immersive games for multiple players.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presented in this paper is a novel real-time virtual reality game developed to enable two participants to play table tennis immersively with each other's avatar in a shared virtual environment. It uses a wireless hybrid inertial and ultrasonic tracking system to provide the positions and orientations of both the head (view point) and hand (racket) of each player, as well as two large rear-projection stereoscopic screens to provide a view-dependent 3D display of the game environment. Additionally, a physics-based ball animation model is designed for the game, which includes fast detection of the ball colliding with table, net and quick moving rackets. The system is shown to offer some unique features and form a good platform for development of other immersive games for multiple players.",
"fno": "05571164",
"keywords": [
"Avatars",
"Computer Animation",
"Computer Games",
"Target Tracking",
"Real Time Immersive Table Tennis Game",
"Motion Tracking",
"Virtual Reality Game",
"Shared Virtual Environment",
"Wireless Hybrid Inertial System",
"Ultrasonic Tracking System",
"Rear Projection Stereoscopic Screens",
"View Dependent 3 D Display",
"Physics Based Ball Animation Model",
"Multiple Players",
"Avatar",
"Games",
"Tracking",
"Wireless Communication",
"Servers",
"Avatars",
"Acoustics",
"Sensors",
"Immersive Game",
"Motion Tracking",
"Stereoscopic Display",
"Interaction Techniques"
],
"authors": [
{
"affiliation": null,
"fullName": "Yingzhu Li",
"givenName": "Yingzhu",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lik-Kwan Shark",
"givenName": "Lik-Kwan",
"surname": "Shark",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sarah Jane Hobbs",
"givenName": "Sarah Jane",
"surname": "Hobbs",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "James Ingham",
"givenName": "James",
"surname": "Ingham",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-07-01T00:00:00",
"pubType": "proceedings",
"pages": "500-505",
"year": "2010",
"issn": "1550-6037",
"isbn": "978-1-4244-7846-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05571172",
"articleId": "12OmNBubORF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05571165",
"articleId": "12OmNqzu6Lr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2013/4990/0/4990b019",
"title": "Reconstruction of 3D Trajectories for Performance Analysis in Table Tennis",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2013/4990b019/12OmNAgoV7I",
"parentPublication": {
"id": "proceedings/cvprw/2013/4990/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/digitel/2007/2801/0/04148849",
"title": "Chatterer Ping-pong Table",
"doi": null,
"abstractUrl": "/proceedings-article/digitel/2007/04148849/12OmNqGA5gt",
"parentPublication": {
"id": "proceedings/digitel/2007/2801/0",
"title": "2007 IEEE International Workshop on Digital Games and Intelligent Toys-based Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2017/3876/0/387601a396",
"title": "Personalized Table-Top Game Recommendations",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2017/387601a396/12OmNxdDFHz",
"parentPublication": {
"id": "proceedings/ictai/2017/3876/0",
"title": "2017 IEEE 29th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/1997/7825/0/78250189",
"title": "Sensor Based Synthetic Actors in a Tennis Game Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/1997/78250189/12OmNySosKv",
"parentPublication": {
"id": "proceedings/cgi/1997/7825/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciip/2015/0148/0/07414772",
"title": "Automated ball tracking in tennis videos",
"doi": null,
"abstractUrl": "/proceedings-article/iciip/2015/07414772/12OmNynJMRW",
"parentPublication": {
"id": "proceedings/iciip/2015/0148/0",
"title": "2015 Third International Conference on Image Information Processing (ICIIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/01/v0015",
"title": "Real-Time Interaction with a Humanoid Avatar in an Immersive Table Tennis Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2007/01/v0015/13rRUxOdD2y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2006/04/mcg2006040010",
"title": "V-Pong: An Immersive Table Tennis Simulation",
"doi": null,
"abstractUrl": "/magazine/cg/2006/04/mcg2006040010/13rRUxZ0o3V",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a189",
"title": "Markerless Racket Pose Detection and Stroke Classification Based on Stereo Vision for Table Tennis Robots",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a189/18M7ddCK7o4",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2019/5712/0/09107693",
"title": "Kinect Based Virtual Referee For Table Tennis Game: TTV (Table Tennis Var System)",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2019/09107693/1koLtmmbCGQ",
"parentPublication": {
"id": "proceedings/icisce/2019/5712/0",
"title": "2019 6th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150877",
"title": "TTNet: Real-time temporal and spatial video analysis of table tennis",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150877/1lPHnyPws1y",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJd0JOwO9a",
"doi": "10.1109/VRW55335.2022.00253",
"title": "Assist Home Training Table Tennis Skill Acquisition via Immersive Learning and Web Technologies",
"normalizedTitle": "Assist Home Training Table Tennis Skill Acquisition via Immersive Learning and Web Technologies",
"abstract": "Sports applications in Virtual Reality (VR) have become immensely popular for training skill-based sports like table tennis. However, the existing researches do not focus on designing an intuitive system for efficient communication between the trainee and the coach. We developed a VR table tennis training system for table tennis skill acquisition that focuses on helping coaches to convey a trainee's mistake clearly. Our system consists of a VR training system where trainees can learn a skill gradually and a web-based feedback annotative tool for coaches. Trainees can examine their mistakes through a tablet or an immersive VR world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Sports applications in Virtual Reality (VR) have become immensely popular for training skill-based sports like table tennis. However, the existing researches do not focus on designing an intuitive system for efficient communication between the trainee and the coach. We developed a VR table tennis training system for table tennis skill acquisition that focuses on helping coaches to convey a trainee's mistake clearly. Our system consists of a VR training system where trainees can learn a skill gradually and a web-based feedback annotative tool for coaches. Trainees can examine their mistakes through a tablet or an immersive VR world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Sports applications in Virtual Reality (VR) have become immensely popular for training skill-based sports like table tennis. However, the existing researches do not focus on designing an intuitive system for efficient communication between the trainee and the coach. We developed a VR table tennis training system for table tennis skill acquisition that focuses on helping coaches to convey a trainee's mistake clearly. Our system consists of a VR training system where trainees can learn a skill gradually and a web-based feedback annotative tool for coaches. Trainees can examine their mistakes through a tablet or an immersive VR world.",
"fno": "840200a804",
"keywords": [
"Computer Based Training",
"Computer Games",
"Human Computer Interaction",
"Sport",
"Virtual Reality",
"Assist Home Training Table Tennis Skill Acquisition",
"Immersive Learning",
"Web Technologies",
"Sports Applications",
"Virtual Reality",
"Training Skill Based Sports",
"Intuitive System",
"Trainee",
"Coach",
"VR Table Tennis Training System",
"VR Training System",
"Web Based Feedback Annotative Tool",
"Immersive VR World",
"Training",
"Three Dimensional Displays",
"Design Methodology",
"Conferences",
"Virtual Reality",
"User Interfaces",
"Sports",
"Human Centered Computing X 2014 Visualization X 2014 Visualization Systems And Tools X 2014 Visualization Toolkits",
"Interaction Design X 2014 Interaction Design Process And Methods X 2014 Activity Centered Design"
],
"authors": [
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Jian-Jia Weng",
"givenName": "Jian-Jia",
"surname": "Weng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Yu-Hsin Wang",
"givenName": "Yu-Hsin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Calvin Ku",
"givenName": "Calvin",
"surname": "Ku",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Dong-Xian Wu",
"givenName": "Dong-Xian",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Yi-Min Lau",
"givenName": "Yi-Min",
"surname": "Lau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Wan-Lun Tsai",
"givenName": "Wan-Lun",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Tse-Yu Pan",
"givenName": "Tse-Yu",
"surname": "Pan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Min-Chun Hu",
"givenName": "Min-Chun",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Hung-Kuo Chu",
"givenName": "Hung-Kuo",
"surname": "Chu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Taiwan",
"fullName": "Te-Cheng Wu",
"givenName": "Te-Cheng",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "804-805",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a802",
"articleId": "1CJfq7DQm76",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a806",
"articleId": "1CJcNcP5uEg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icise-ie/2021/3829/0/382900b210",
"title": "Application of Micro-lecture in Table Tennis Teaching for Children",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900b210/1C8GamvUKGI",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a275",
"title": "Table Tennis Skill Learning in VR with Step by Step Guides using Forehand Drive as a Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a275/1KmFgOCBg1a",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798309",
"title": "Improve the Decision-making Skill of Basketball Players by an Action-aware VR Training System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798309/1cJ1adrlIoo",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08795584",
"title": "CourtTime: Generating Actionable Insights into Tennis Matches Using Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08795584/1csHUeq7TB6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382892",
"title": "SPinPong - Virtual Reality Table Tennis Skill Acquisition using Visual, Haptic and Temporal Cues",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382892/1saZrRoiA3C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/09411869",
"title": "Tac-Miner: Visual Tactic Mining for Multiple Table Tennis Matches",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/09411869/1t2ii7r7RcI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2021/3892/0/389200a632",
"title": "Tactical Decision System of Table Tennis Match based on C4.5 Decision Tree",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2021/389200a632/1t2nmIZ5RBe",
"parentPublication": {
"id": "proceedings/icmtma/2021/3892/0",
"title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09446582",
"title": "Performance Improvement and Skill Transfer in Table Tennis Through Training in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09446582/1u8lz4qWghi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tcs/2021/2910/0/291000a533",
"title": "A Study of Liu Shiwen’s Table Tennis Techniques and Tactics Based on Computer-aided Video",
"doi": null,
"abstractUrl": "/proceedings-article/tcs/2021/291000a533/1wRIl8gP8xW",
"parentPublication": {
"id": "proceedings/tcs/2021/2910/0",
"title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09626557",
"title": "SimuExplorer: Visual Exploration of Game Simulation in Table Tennis",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09626557/1yNd5vlQLrG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1lPGXn8hEiI",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1lPHnyPws1y",
"doi": "10.1109/CVPRW50498.2020.00450",
"title": "TTNet: Real-time temporal and spatial video analysis of table tennis",
"normalizedTitle": "TTNet: Real-time temporal and spatial video analysis of table tennis",
"abstract": "We present a neural network TTNet aimed at real-time processing of high-resolution table tennis videos, providing both temporal (events spotting) and spatial (ball detection and semantic segmentation) data. This approach gives core information for reasoning score updates by an auto-referee system.We also publish a multi-task dataset OpenTTGames with videos of table tennis games in 120 fps labeled with events, semantic segmentation masks, and ball coordinates for evaluation of multi-task approaches, primarily oriented on spotting of quick events and small objects tracking. TTNet demonstrated 97.0% accuracy in game events spotting along with 2 pixels RMSE in ball detection with 97.5% accuracy on the test part of the presented dataset.The proposed network allows the processing of downscaled full HD videos with inference time below 6 ms per input tensor on a machine with a single consumer-grade GPU. Thus, we are contributing to the development of real-time multi-task deep learning applications and presenting approach, which is potentially capable of substituting manual data collection by sports scouts, providing support for referees' decision-making, and gathering extra information about the game process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a neural network TTNet aimed at real-time processing of high-resolution table tennis videos, providing both temporal (events spotting) and spatial (ball detection and semantic segmentation) data. This approach gives core information for reasoning score updates by an auto-referee system.We also publish a multi-task dataset OpenTTGames with videos of table tennis games in 120 fps labeled with events, semantic segmentation masks, and ball coordinates for evaluation of multi-task approaches, primarily oriented on spotting of quick events and small objects tracking. TTNet demonstrated 97.0% accuracy in game events spotting along with 2 pixels RMSE in ball detection with 97.5% accuracy on the test part of the presented dataset.The proposed network allows the processing of downscaled full HD videos with inference time below 6 ms per input tensor on a machine with a single consumer-grade GPU. Thus, we are contributing to the development of real-time multi-task deep learning applications and presenting approach, which is potentially capable of substituting manual data collection by sports scouts, providing support for referees' decision-making, and gathering extra information about the game process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a neural network TTNet aimed at real-time processing of high-resolution table tennis videos, providing both temporal (events spotting) and spatial (ball detection and semantic segmentation) data. This approach gives core information for reasoning score updates by an auto-referee system.We also publish a multi-task dataset OpenTTGames with videos of table tennis games in 120 fps labeled with events, semantic segmentation masks, and ball coordinates for evaluation of multi-task approaches, primarily oriented on spotting of quick events and small objects tracking. TTNet demonstrated 97.0% accuracy in game events spotting along with 2 pixels RMSE in ball detection with 97.5% accuracy on the test part of the presented dataset.The proposed network allows the processing of downscaled full HD videos with inference time below 6 ms per input tensor on a machine with a single consumer-grade GPU. Thus, we are contributing to the development of real-time multi-task deep learning applications and presenting approach, which is potentially capable of substituting manual data collection by sports scouts, providing support for referees' decision-making, and gathering extra information about the game process.",
"fno": "09150877",
"keywords": [
"Image Segmentation",
"Inference Mechanisms",
"Learning Artificial Intelligence",
"Neural Nets",
"Object Detection",
"Object Tracking",
"Sport",
"Video Signal Processing",
"Spatial Video Analysis",
"Real Time Processing",
"High Resolution Table Tennis Videos",
"Ball Detection",
"Core Information",
"Reasoning Score Updates",
"Auto Referee System",
"Multitask Dataset Open TT Games",
"Table Tennis Games",
"Semantic Segmentation Masks",
"Ball Coordinates",
"Objects Tracking",
"Game Events Spotting",
"Downscaled Full HD Videos",
"Inference Time",
"Single Consumer Grade GPU",
"Real Time Multitask Deep Learning Applications",
"Manual Data Collection",
"Game Process",
"TT Net Neural Network",
"Games",
"Task Analysis",
"Streaming Media",
"Semantics",
"Computer Architecture",
"Real Time Systems"
],
"authors": [
{
"affiliation": "OSAI",
"fullName": "Roman Voeikov",
"givenName": "Roman",
"surname": "Voeikov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "OSAI",
"fullName": "Nikolay Falaleev",
"givenName": "Nikolay",
"surname": "Falaleev",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "OSAI",
"fullName": "Ruslan Baikulov",
"givenName": "Ruslan",
"surname": "Baikulov",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3866-3874",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9360-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09150981",
"articleId": "1lPHv2kMgBa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09150722",
"articleId": "1lPHdbkSzLy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2013/4990/0/4990b019",
"title": "Reconstruction of 3D Trajectories for Performance Analysis in Table Tennis",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2013/4990b019/12OmNAgoV7I",
"parentPublication": {
"id": "proceedings/cvprw/2013/4990/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607725",
"title": "Event detection in tennis matches based on video data mining",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607725/12OmNqGRGkL",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmm/2005/2164/0/21640102",
"title": "Analyzing Tennis Tactics from Broadcasting Tennis Video Clips",
"doi": null,
"abstractUrl": "/proceedings-article/mmm/2005/21640102/12OmNy50gcj",
"parentPublication": {
"id": "proceedings/mmm/2005/2164/0",
"title": "Multi-Media Modeling Conference, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2015/6850/0/6850a712",
"title": "Discussion on Psychological Perception Representation and Knowledge Acquisition of Table Tennis Players",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2015/6850a712/12OmNyL0Tq5",
"parentPublication": {
"id": "proceedings/icisce/2015/6850/0",
"title": "2015 2nd International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciip/2015/0148/0/07414772",
"title": "Automated ball tracking in tennis videos",
"doi": null,
"abstractUrl": "/proceedings-article/iciip/2015/07414772/12OmNynJMRW",
"parentPublication": {
"id": "proceedings/iciip/2015/0148/0",
"title": "2015 Third International Conference on Image Information Processing (ICIIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876044",
"title": "TenniVis: Visualization for Tennis Match Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876044/13rRUwI5Ugd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017600",
"title": "iTTVis: Interactive Visualization of Table Tennis Data",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017600/13rRUyY28YD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2018/2666/1/266601a783",
"title": "Painless Tennis Ball Tracking System",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2018/266601a783/144U9aHWXpJ",
"parentPublication": {
"id": "proceedings/compsac/2018/2666/2",
"title": "2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150914",
"title": "Improved Soccer Action Spotting using both Audio and Video Streams",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150914/1lPH1Jfjvt6",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceccs/2020/8558/0/855800a053",
"title": "Deep Learning Application in Broadcast Tennis Video Annotation",
"doi": null,
"abstractUrl": "/proceedings-article/iceccs/2020/855800a053/1s658VbjRjq",
"parentPublication": {
"id": "proceedings/iceccs/2020/8558/0",
"title": "2020 25th International Conference on Engineering of Complex Computer Systems (ICECCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwpGgKa",
"doi": "10.1109/VR.2016.7504727",
"title": "Head mounted projection for enhanced gaze in social interactions",
"normalizedTitle": "Head mounted projection for enhanced gaze in social interactions",
"abstract": "Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals. This could cause perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing. We discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”",
"abstracts": [
{
"abstractType": "Regular",
"content": "Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals. This could cause perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing. We discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals. This could cause perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing. We discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”",
"fno": "07504727",
"keywords": [
"Head",
"Optical Imaging",
"Observers",
"Games",
"Adaptive Optics",
"Virtual Reality",
"Training",
"B 4 2 Input Output And Data Communications Input Output Devices Image Display",
"H 5 1 Information Interfaces And Presentation I 7 Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "David M. Krum",
"givenName": "David M.",
"surname": "Krum",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Sin-Hwa Kang",
"givenName": "Sin-Hwa",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Thai Phan",
"givenName": "Thai",
"surname": "Phan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google, Inc.",
"fullName": "Lauren Cairco Dukes",
"givenName": "Lauren Cairco",
"surname": "Dukes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "Mark Bolas",
"givenName": "Mark",
"surname": "Bolas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "209-210",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504726",
"articleId": "12OmNBOCWvM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504728",
"articleId": "12OmNzRHOQ3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504755",
"title": "Spatial consistency perception in optical and video see-through head-mounted augmentations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504755/12OmNqNXEli",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223450",
"title": "Evaluating optical see-through head-mounted display calibration via frustum visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223450/12OmNrAv3Ap",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2000/0478/0/04780233",
"title": "Visuo-Haptic Display Using Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836487",
"title": "Reduction of Interaction Space in Single Point Active Alignment Method for Optical See-Through Head-Mounted Display Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836487/12OmNyRg4AG",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2011/2183/0/06162910",
"title": "An empiric evaluation of confirmation methods for optical see-through head-mounted display calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06162910/12OmNzwpUfP",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040701",
"title": "Peripheral Stimulation and its Effect on Perceived Spatial Scale in Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040701/13rRUx0xPmZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a544",
"title": "Taming Cyclops: Mixed Reality Head-Mounted Displays as Laser Safety Goggles for Advanced Optics Laboratories",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a544/1CJcMmE19cY",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a461",
"title": "Analysis of Viewing Behaviors in a Head-Mounted Virtual Geographic Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a461/1ap5zEzl44M",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2020/6497/0/649700a041",
"title": "Differences in the Uncanny Valley between Head-Mounted Displays and Monitors",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2020/649700a041/1olHyEEy8CY",
"parentPublication": {
"id": "proceedings/cw/2020/6497/0",
"title": "2020 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwwMf3H",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismarw",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyRg4AG",
"doi": "10.1109/ISMAR-Adjunct.2016.0066",
"title": "Reduction of Interaction Space in Single Point Active Alignment Method for Optical See-Through Head-Mounted Display Calibration",
"normalizedTitle": "Reduction of Interaction Space in Single Point Active Alignment Method for Optical See-Through Head-Mounted Display Calibration",
"abstract": "With users always involved in the calibration of optical see-through head-mounted displays, the accuracy of calibration is subject to human-related errors, for example, postural sway, an unstable input medium, and fatigue. In this paper we propose a new calibration approach: Fixed-head 2 degree-of-freedom (DOF) interaction for Single Point Active Alignment Method (SPAAM) reduces the interaction space from a typical 6 DOF head motion to a 2 DOF cursor position on the semi-transparent screen. It uses a mouse as input medium, which is more intuitive and stable, and reduces user fatigue by simplifying and speeding up the calibration procedure. A multi-user study confirmed the significant reduction of human-related error by comparing our novel fixed-head 2 DOF interaction to the traditional interaction methods for SPAAM.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With users always involved in the calibration of optical see-through head-mounted displays, the accuracy of calibration is subject to human-related errors, for example, postural sway, an unstable input medium, and fatigue. In this paper we propose a new calibration approach: Fixed-head 2 degree-of-freedom (DOF) interaction for Single Point Active Alignment Method (SPAAM) reduces the interaction space from a typical 6 DOF head motion to a 2 DOF cursor position on the semi-transparent screen. It uses a mouse as input medium, which is more intuitive and stable, and reduces user fatigue by simplifying and speeding up the calibration procedure. A multi-user study confirmed the significant reduction of human-related error by comparing our novel fixed-head 2 DOF interaction to the traditional interaction methods for SPAAM.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With users always involved in the calibration of optical see-through head-mounted displays, the accuracy of calibration is subject to human-related errors, for example, postural sway, an unstable input medium, and fatigue. In this paper we propose a new calibration approach: Fixed-head 2 degree-of-freedom (DOF) interaction for Single Point Active Alignment Method (SPAAM) reduces the interaction space from a typical 6 DOF head motion to a 2 DOF cursor position on the semi-transparent screen. It uses a mouse as input medium, which is more intuitive and stable, and reduces user fatigue by simplifying and speeding up the calibration procedure. A multi-user study confirmed the significant reduction of human-related error by comparing our novel fixed-head 2 DOF interaction to the traditional interaction methods for SPAAM.",
"fno": "07836487",
"keywords": [
"Augmented Reality",
"Calibration",
"Helmet Mounted Displays",
"Human Factors",
"Mouse Controllers Computers",
"Interaction Space Reduction",
"Single Point Active Alignment Method",
"Optical See Through Head Mounted Display Calibration",
"Human Related Errors",
"Fixed Head 2 Degree Of Freedom Interaction",
"DOF Interaction",
"SPAAM",
"6 DOF Head Motion",
"2 DOF Cursor Position",
"Semitransparent Screen",
"Mouse",
"User Fatigue Reduction",
"Augmented Reality",
"Calibration",
"Augmented Reality",
"Head",
"Adaptive Optics",
"Three Dimensional Displays",
"Optical Imaging",
"Mice",
"Augmented Reality",
"SPAAM",
"OST HMD Calibration And Human Factors"
],
"authors": [
{
"affiliation": null,
"fullName": "Long Qian",
"givenName": "Long",
"surname": "Qian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alexander Winkler",
"givenName": "Alexander",
"surname": "Winkler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bernhard Fuerst",
"givenName": "Bernhard",
"surname": "Fuerst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Peter Kazanzides",
"givenName": "Peter",
"surname": "Kazanzides",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "156-157",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3740-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07836486",
"articleId": "12OmNx7XH8d",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07836488",
"articleId": "12OmNBVrji6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504693",
"title": "A calibration method for optical see-through head-mounted displays with a depth camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504693/12OmNAnMuMd",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223450",
"title": "Evaluating optical see-through head-mounted display calibration via frustum visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223450/12OmNrAv3Ap",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836486",
"title": "Modeling Physical Structure as Additional Constraints for Stereoscopic Optical See-Through Head-Mounted Display Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836486/12OmNx7XH8d",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892255",
"title": "Robust optical see-through head-mounted display calibration: Taking anisotropic nature of user interaction errors into account",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892255/12OmNxvO04e",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948424",
"title": "Performance and sensitivity analysis of INDICA: INteraction-Free DIsplay CAlibration for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948424/12OmNyYm2oO",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2011/2183/0/06162910",
"title": "An empiric evaluation of confirmation methods for optical see-through head-mounted display calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06162910/12OmNzwpUfP",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446429",
"title": "Impact of Alignment Point Distance Distribution on SPAAM Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446429/13bd1gCd7Sz",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07021939",
"title": "Subjective Evaluation of a Semi-Automatic Optical See-Through Head-Mounted Display Calibration Technique",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07021939/13rRUwInvyB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a021",
"title": "Impact of Alignment Point Distance and Posture on SPAAM Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a021/17D45WaTkli",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyUWQR6",
"title": "Virtual Reality Annual International Symposium",
"acronym": "vrais",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyrqzDT",
"doi": "10.1109/VRAIS.1993.380765",
"title": "Sparcchair: A one hundred million pixel display",
"normalizedTitle": "Sparcchair: A one hundred million pixel display",
"abstract": "The authors investigate whether a high-resolution head-mounted display, roving around a much larger frame buffer image, can give a user the impression of viewing a single very large display screen. A prototype is constructed, consisting of an 1120 /spl times/ 900 pixel head-mounted display, an ultrasonic head-tracker, a 16,386 /spl times/ 6,144 pixel frame buffer, and suitable X-window control software, as a means of studying this question. Applications can write to the large frame buffer using the window system, and the view can navigate around the image rapidly using head rotations. The prototype system, although somewhat awkward to use due to a limited field of view in the head-mounted display, shows that head rotation is a fast, convenient way to switch display contexts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors investigate whether a high-resolution head-mounted display, roving around a much larger frame buffer image, can give a user the impression of viewing a single very large display screen. A prototype is constructed, consisting of an 1120 /spl times/ 900 pixel head-mounted display, an ultrasonic head-tracker, a 16,386 /spl times/ 6,144 pixel frame buffer, and suitable X-window control software, as a means of studying this question. Applications can write to the large frame buffer using the window system, and the view can navigate around the image rapidly using head rotations. The prototype system, although somewhat awkward to use due to a limited field of view in the head-mounted display, shows that head rotation is a fast, convenient way to switch display contexts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors investigate whether a high-resolution head-mounted display, roving around a much larger frame buffer image, can give a user the impression of viewing a single very large display screen. A prototype is constructed, consisting of an 1120 /spl times/ 900 pixel head-mounted display, an ultrasonic head-tracker, a 16,386 /spl times/ 6,144 pixel frame buffer, and suitable X-window control software, as a means of studying this question. Applications can write to the large frame buffer using the window system, and the view can navigate around the image rapidly using head rotations. The prototype system, although somewhat awkward to use due to a limited field of view in the head-mounted display, shows that head rotation is a fast, convenient way to switch display contexts.",
"fno": "00380765",
"keywords": [
"Display Contexts",
"Sparcchoir",
"High Resolution Head Mounted Display",
"Frame Buffer Image",
"Very Large Display Screen",
"Head Mounted Display",
"Ultrasonic Head Tracker",
"X Window Control Software",
"Window System",
"Head Rotations",
"Head Rotation"
],
"authors": [
{
"affiliation": "Sun Microsystems Lab., Inc., Chelmsford, MA, USA",
"fullName": "B.A. Reichlen",
"givenName": "B.A.",
"surname": "Reichlen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrais",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1993-09-01T00:00:00",
"pubType": "proceedings",
"pages": "300-307",
"year": "1993",
"issn": null,
"isbn": "0-7803-1363-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00380764",
"articleId": "12OmNzSQdnV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00380766",
"articleId": "12OmNvTjZUe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrkjVbP",
"title": "Web Information Systems Engineering, International Conference on",
"acronym": "wise",
"groupId": "1000812",
"volume": "1",
"displayVolume": "2",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBRbkst",
"doi": "10.1109/WISE.2000.882420",
"title": "Optional and Responsive Locking in Distributed Collaborative Object Graphics Editing Systems",
"normalizedTitle": "Optional and Responsive Locking in Distributed Collaborative Object Graphics Editing Systems",
"abstract": "Object-based collaborative graphics editing systems allow multiple users to edit the same graphics document at the same time from multiple sites. This paper examines the use of locking to prevent the generation of conflicting operations in this type of systems. Two types of locks are examined: object and region. A locking scheme, which preserves the intentions of all operations, is proposed. Furthermore, the problems of lock ownership caused by concurrent operations are resolved.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Object-based collaborative graphics editing systems allow multiple users to edit the same graphics document at the same time from multiple sites. This paper examines the use of locking to prevent the generation of conflicting operations in this type of systems. Two types of locks are examined: object and region. A locking scheme, which preserves the intentions of all operations, is proposed. Furthermore, the problems of lock ownership caused by concurrent operations are resolved.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Object-based collaborative graphics editing systems allow multiple users to edit the same graphics document at the same time from multiple sites. This paper examines the use of locking to prevent the generation of conflicting operations in this type of systems. Two types of locks are examined: object and region. A locking scheme, which preserves the intentions of all operations, is proposed. Furthermore, the problems of lock ownership caused by concurrent operations are resolved.",
"fno": "05771414",
"keywords": [
"Locking",
"Consistency Maintenance",
"Collaborative Editing",
"Graphics Editing",
"Distributed System"
],
"authors": [
{
"affiliation": "Griffith University",
"fullName": "David Chen",
"givenName": "David",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Griffith University",
"fullName": "Chengzheng Sun",
"givenName": "Chengzheng",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wise",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-06-01T00:00:00",
"pubType": "proceedings",
"pages": "0414",
"year": "2000",
"issn": null,
"isbn": "0-7605-0577-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05771409",
"articleId": "12OmNyFCvQY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05771419",
"articleId": "12OmNwMXnt9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBSBk5H",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"acronym": "cadgraphics",
"groupId": "1001488",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTJIN5",
"doi": "10.1109/CAD/Graphics.2011.58",
"title": "Tetrahedral Mesh Editing with Local Feature Manipulations",
"normalizedTitle": "Tetrahedral Mesh Editing with Local Feature Manipulations",
"abstract": "Volumetric mesh models are widely used nowadays in areas like product quality evaluation, physically-based deformation, numerical simulation, and so on. In the application of Computer Aided Design (CAD) and Computer Aided Engineering (CAE) integration, providing a method to directly manipulate mesh models can reduce much effort in the simulation process. However, this integration is limited or only provided from CAD to CAE at current time. In this paper, we propose a framework for volumetric mesh editing based on feature dimensions. In our framework, the volumetric mesh is first decomposed into volumetric features based on corresponding surface features. Then, random-walk based interpolation algorithm is applied to manipulation of local features. Our framework allows users to change the dimensions of the recognized features on the volumetric mesh directly. Then optimization, which is limited to local features, is employed to refine the tetrahedrons of the volumetric features once the quality of the elements does not meet the requirements. Experimental results show that the features hold precisely after the editing operations. Additionally, the quality of the elements keeps well after the tetrahedral mesh has been edited.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Volumetric mesh models are widely used nowadays in areas like product quality evaluation, physically-based deformation, numerical simulation, and so on. In the application of Computer Aided Design (CAD) and Computer Aided Engineering (CAE) integration, providing a method to directly manipulate mesh models can reduce much effort in the simulation process. However, this integration is limited or only provided from CAD to CAE at current time. In this paper, we propose a framework for volumetric mesh editing based on feature dimensions. In our framework, the volumetric mesh is first decomposed into volumetric features based on corresponding surface features. Then, random-walk based interpolation algorithm is applied to manipulation of local features. Our framework allows users to change the dimensions of the recognized features on the volumetric mesh directly. Then optimization, which is limited to local features, is employed to refine the tetrahedrons of the volumetric features once the quality of the elements does not meet the requirements. Experimental results show that the features hold precisely after the editing operations. Additionally, the quality of the elements keeps well after the tetrahedral mesh has been edited.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Volumetric mesh models are widely used nowadays in areas like product quality evaluation, physically-based deformation, numerical simulation, and so on. In the application of Computer Aided Design (CAD) and Computer Aided Engineering (CAE) integration, providing a method to directly manipulate mesh models can reduce much effort in the simulation process. However, this integration is limited or only provided from CAD to CAE at current time. In this paper, we propose a framework for volumetric mesh editing based on feature dimensions. In our framework, the volumetric mesh is first decomposed into volumetric features based on corresponding surface features. Then, random-walk based interpolation algorithm is applied to manipulation of local features. Our framework allows users to change the dimensions of the recognized features on the volumetric mesh directly. Then optimization, which is limited to local features, is employed to refine the tetrahedrons of the volumetric features once the quality of the elements does not meet the requirements. Experimental results show that the features hold precisely after the editing operations. Additionally, the quality of the elements keeps well after the tetrahedral mesh has been edited.",
"fno": "4497a130",
"keywords": [
"CAD CAE Integration",
"Volumetric Mesh",
"Mesh Segmentation",
"Mesh Editing",
"Feature Preserved"
],
"authors": [
{
"affiliation": null,
"fullName": "Chuhua Xian",
"givenName": "Chuhua",
"surname": "Xian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shuming Gao",
"givenName": "Shuming",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tianming Zhang",
"givenName": "Tianming",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cadgraphics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "130-137",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4497-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4497a124",
"articleId": "12OmNzn38YO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4497a141",
"articleId": "12OmNzt0IBX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cscwd/2005/0002/2/01504189",
"title": "Brep model simplification for feature suppressing using local error evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/cscwd/2005/01504189/12OmNBqdrdA",
"parentPublication": {
"id": "proceedings/cscwd/2005/0002/2",
"title": "International Conference on Computer Supported Cooperative Work in Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2007/3009/0/30090461",
"title": "Simple and Efficient Mesh Editing with Consistent Local Frames",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2007/30090461/12OmNscfHYx",
"parentPublication": {
"id": "proceedings/pg/2007/3009/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a055",
"title": "Multi-scale Method for Adaptive Mesh Editing Based on Rigidity Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a055/12OmNwx3Qao",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a053",
"title": "Finite Element Mesh Editing through CAD Operations",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a053/12OmNxvwoVJ",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498chopra",
"title": "TetFusion: An Algorithm For Rapid Tetrahedral Mesh Simplification",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498chopra/12OmNyQphf1",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvmp/2011/4621/0/4621a148",
"title": "Space-time Editing of 3D Video Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvmp/2011/4621a148/12OmNzGDsMm",
"parentPublication": {
"id": "proceedings/cvmp/2011/4621/0",
"title": "2011 Conference for Visual Media Production",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2004/8788/0/87880433",
"title": "TetSplat Real-Time Rendering and Volume Clipping of Large Unstructured Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880433/12OmNzRHOOj",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06814999",
"title": "Semantic Cage Generation for FE Mesh Editing",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06814999/12OmNzcPAd7",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a198",
"title": "Representing and Manipulating Mesh-Based Character Animations",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a198/12OmNznkKdG",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/01/v0034",
"title": "Robust Feature Classification and Editing",
"doi": null,
"abstractUrl": "/journal/tg/2007/01/v0034/13rRUxBa5nj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrAdstw",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"acronym": "icccnt",
"groupId": "1802177",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqIhFMQ",
"doi": "10.1109/ICCCNT.2013.6726673",
"title": "A novel boundary approach for shape representation and classification",
"normalizedTitle": "A novel boundary approach for shape representation and classification",
"abstract": "Shape is an important visual feature and it is one of the basic features used to describe image content. However, shape representation and classification is a difficult task. This paper presents a new boundary based shape representation and classification algorithm based on mathematical morphology. It consists of two steps. Firstly, an input shape is represented by using Hit Miss Transform (HMT) into a set of structuring elements. Secondly, the extracted shape of the image is classified based on shape features. Experimental results show that the integration of these strategies significantly improves shape database.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Shape is an important visual feature and it is one of the basic features used to describe image content. However, shape representation and classification is a difficult task. This paper presents a new boundary based shape representation and classification algorithm based on mathematical morphology. It consists of two steps. Firstly, an input shape is represented by using Hit Miss Transform (HMT) into a set of structuring elements. Secondly, the extracted shape of the image is classified based on shape features. Experimental results show that the integration of these strategies significantly improves shape database.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Shape is an important visual feature and it is one of the basic features used to describe image content. However, shape representation and classification is a difficult task. This paper presents a new boundary based shape representation and classification algorithm based on mathematical morphology. It consists of two steps. Firstly, an input shape is represented by using Hit Miss Transform (HMT) into a set of structuring elements. Secondly, the extracted shape of the image is classified based on shape features. Experimental results show that the integration of these strategies significantly improves shape database.",
"fno": "06726673",
"keywords": [
"Shape",
"Morphology",
"Feature Extraction",
"Transforms",
"Equations",
"Skeleton",
"Classification Algorithms",
"Shape Classification",
"Mathematical Morphology",
"Hit Miss Transform",
"Shape Representation"
],
"authors": [
{
"affiliation": null,
"fullName": "L. Sumalatha",
"givenName": "L.",
"surname": "Sumalatha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "B. Sujatha",
"givenName": "B.",
"surname": "Sujatha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "P. Sreekanth",
"givenName": "P.",
"surname": "Sreekanth",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icccnt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-3926-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06726672",
"articleId": "12OmNC3FG9k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06726674",
"articleId": "12OmNxdDFL6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/1988/9999/0/00196747",
"title": "Morphological skeleton representation and shape recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196747/12OmNC4wtFC",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscid/2008/3311/2/3311b042",
"title": "Shape and Boundary Analysis for Classification of Breast Masses",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2008/3311b042/12OmNqAU6Ak",
"parentPublication": {
"id": "proceedings/iscid/2008/3311/2",
"title": "2008 International Symposium on Computational Intelligence and Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/2/01315145",
"title": "Shape representation and classification using the Poisson equation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315145/12OmNqBKU9B",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/2",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d537",
"title": "Statistical Shape Modeling Using Morphological Representations",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d537/12OmNrAdsyX",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsip/2014/5100/0/5100a218",
"title": "Shape Representation and Classification through Pattern Spectrum and Local Binary Pattern -- A Decision Level Fusion Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icsip/2014/5100a218/12OmNvStczt",
"parentPublication": {
"id": "proceedings/icsip/2014/5100/0",
"title": "2014 Fifth International Conference on Signal and Image Processing (ICSIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150871",
"title": "Morphological shape representation",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150871/12OmNxIRxVS",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457679",
"title": "Integrating contour and skeleton for shape classification",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457679/12OmNxwWoAy",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2012/4683/0/4683a153",
"title": "Heat Kernels for Non-Rigid Shape Retrieval: Sparse Representation and Efficient Classification",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2012/4683a153/12OmNzVoBxV",
"parentPublication": {
"id": "proceedings/crv/2012/4683/0",
"title": "2012 Ninth Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1989/07/i0701",
"title": "Pattern Spectrum and Multiscale Shape Representation",
"doi": null,
"abstractUrl": "/journal/tp/1989/07/i0701/13rRUwInvg2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150739",
"title": "A Topological Nomenclature for 3D Shape Analysis in Connectomics",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150739/1lPHt0H0q0E",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.