data
dict |
|---|
{
"proceeding": {
"id": "12OmNC2fGtw",
"title": "2013 First International Symposium on Computing and Networking - Across Practical Development and Theoretical Research (CANDAR)",
"acronym": "candar",
"groupId": "1803431",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvlg8k1",
"doi": "10.1109/CANDAR.2013.35",
"title": "ASCII Art Generation Using the Local Exhaustive Search on the GPU",
"normalizedTitle": "ASCII Art Generation Using the Local Exhaustive Search on the GPU",
"abstract": "An ASCII art is a matrix of characters that reproduces an original gray-scale image. It is commonly used to represent pseudo gray-scale images in text based messages. Since automatic generation of high quality ASCII art images is very hard, they are usually produced by hand. The main contribution of this paper is to propose a new technique to generate an ASCII art that reproduces the original tone and the details of an input gray-scale image. Our new technique is inspired by the local exhaustive search to optimize binary images for printing based on the characteristic of the human visual system. Although it can generate high quality ASCII art images, a lot of computing time is necessary for the local exhaustive search. Hence, we have implemented our new technique in a GPU to accelerate the computation. The experimental results shows that the GPU implementation can achieve a speedup factor up to 57.1 over the conventional CPU implementation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An ASCII art is a matrix of characters that reproduces an original gray-scale image. It is commonly used to represent pseudo gray-scale images in text based messages. Since automatic generation of high quality ASCII art images is very hard, they are usually produced by hand. The main contribution of this paper is to propose a new technique to generate an ASCII art that reproduces the original tone and the details of an input gray-scale image. Our new technique is inspired by the local exhaustive search to optimize binary images for printing based on the characteristic of the human visual system. Although it can generate high quality ASCII art images, a lot of computing time is necessary for the local exhaustive search. Hence, we have implemented our new technique in a GPU to accelerate the computation. The experimental results shows that the GPU implementation can achieve a speedup factor up to 57.1 over the conventional CPU implementation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An ASCII art is a matrix of characters that reproduces an original gray-scale image. It is commonly used to represent pseudo gray-scale images in text based messages. Since automatic generation of high quality ASCII art images is very hard, they are usually produced by hand. The main contribution of this paper is to propose a new technique to generate an ASCII art that reproduces the original tone and the details of an input gray-scale image. Our new technique is inspired by the local exhaustive search to optimize binary images for printing based on the characteristic of the human visual system. Although it can generate high quality ASCII art images, a lot of computing time is necessary for the local exhaustive search. Hence, we have implemented our new technique in a GPU to accelerate the computation. The experimental results shows that the GPU implementation can achieve a speedup factor up to 57.1 over the conventional CPU implementation.",
"fno": "06726897",
"keywords": [
"Art",
"Graphics Processing Units",
"Image Processing",
"Search Problems",
"Human Visual System",
"Binary Image Optimization",
"Gray Scale Image",
"GPU",
"Local Exhaustive Search",
"ASCII Art Generation",
"Art",
"Graphics Processing Units",
"Gray Scale",
"Instruction Sets",
"Acceleration",
"Computer Architecture",
"Visual Systems",
"ASCII Art",
"Local Exhaustive Search",
"Human Visual System",
"GPU",
"Parallel Computing"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuji Takeuchi",
"givenName": "Yuji",
"surname": "Takeuchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daisuke Takafuji",
"givenName": "Daisuke",
"surname": "Takafuji",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yasuaki Ito",
"givenName": "Yasuaki",
"surname": "Ito",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Koji Nakano",
"givenName": "Koji",
"surname": "Nakano",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "candar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "194-200",
"year": "2013",
"issn": "2379-1888",
"isbn": "978-1-4799-2796-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06726896",
"articleId": "12OmNwFicTO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06726898",
"articleId": "12OmNzYwbYe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/malware/2008/3288/0/04690859",
"title": "Image spam — ASCII to the rescue!",
"doi": null,
"abstractUrl": "/proceedings-article/malware/2008/04690859/12OmNAio6Zo",
"parentPublication": {
"id": "proceedings/malware/2008/3288/0",
"title": "2008 3rd International Conference on Malicious and Unwanted Software (MALWARE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2013/5001/0/06655801",
"title": "Art Making Using an Haptic Device for Interactive Digital Painting",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2013/06655801/12OmNBUAvWs",
"parentPublication": {
"id": "proceedings/svr/2013/5001/0",
"title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispan-fcst-iscc/2017/0840/0/0840a391",
"title": "GPU-based Gray-Level Co-occurrence Matrix for Extracting Features from Magnetic Resonance Images",
"doi": null,
"abstractUrl": "/proceedings-article/ispan-fcst-iscc/2017/0840a391/12OmNBgQFH7",
"parentPublication": {
"id": "proceedings/ispan-fcst-iscc/2017/0840/0",
"title": "2017 14th International Symposium on Pervasive Systems, Algorithms and Networks & 2017 11th International Conference on Frontier of Computer Science and Technology & 2017 Third International Symposium of Creative Computing (ISPAN-FCST-ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispdc/2015/7147/0/07165134",
"title": "GPU-Accelerated Digital Halftoning by the Local Exhaustive Search",
"doi": null,
"abstractUrl": "/proceedings-article/ispdc/2015/07165134/12OmNxXCGLa",
"parentPublication": {
"id": "proceedings/ispdc/2015/7147/0",
"title": "2015 14th International Symposium on Parallel and Distributed Computing (ISPDC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770959",
"title": "Digital-Primitive Art Research: Animation Permeates Centuries-old Rawhides",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770959/12OmNyuPKYE",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2013/5159/0/06726401",
"title": "Traditional Chinese Patterns Analysis Based on Moment Invariants",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2013/06726401/12OmNzgwmPh",
"parentPublication": {
"id": "proceedings/msn/2013/5159/0",
"title": "2013 Ninth International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/03/mcg2005030040",
"title": "Depicting Dynamics Using Principles of Visual Art and Narrations",
"doi": null,
"abstractUrl": "/magazine/cg/2005/03/mcg2005030040/13rRUILLkxV",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/09/ttp2010091537",
"title": "Color to Gray: Visual Cue Preservation",
"doi": null,
"abstractUrl": "/journal/tp/2010/09/ttp2010091537/13rRUwbaqMF",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07491376",
"title": "ASCII Art Synthesis from Natural Photographs",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07491376/13rRUxBa5nr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859987",
"title": "DunhuangGAN: A Generative Adversarial Network for Dunhuang Mural Art Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859987/1G9E74GJUzK",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwdbV00",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyvGyjD",
"doi": "10.1109/CVPR.2012.6247725",
"title": "Learning image-specific parameters for interactive segmentation",
"normalizedTitle": "Learning image-specific parameters for interactive segmentation",
"abstract": "In this paper, we present a novel interactive image segmentation technique that automatically learns segmentation parameters tailored for each and every image. Unlike existing work, our method does not require any offline parameter tuning or training stage, and is capable of determining image-specific parameters according to some simple user interactions with the target image. We formulate the segmentation problem as an inference of a conditional random field (CRF) over a segmentation mask and the target image, and parametrize this CRF by different weights (e.g., color, texture and smoothing). The weight parameters are learned via an energy margin maximization, which is solved using a constraint approximation scheme and the cutting plane method. Experimental results show that our method, by learning image-specific parameters automatically, outperforms other state-of-the-art interactive image segmentation techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a novel interactive image segmentation technique that automatically learns segmentation parameters tailored for each and every image. Unlike existing work, our method does not require any offline parameter tuning or training stage, and is capable of determining image-specific parameters according to some simple user interactions with the target image. We formulate the segmentation problem as an inference of a conditional random field (CRF) over a segmentation mask and the target image, and parametrize this CRF by different weights (e.g., color, texture and smoothing). The weight parameters are learned via an energy margin maximization, which is solved using a constraint approximation scheme and the cutting plane method. Experimental results show that our method, by learning image-specific parameters automatically, outperforms other state-of-the-art interactive image segmentation techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a novel interactive image segmentation technique that automatically learns segmentation parameters tailored for each and every image. Unlike existing work, our method does not require any offline parameter tuning or training stage, and is capable of determining image-specific parameters according to some simple user interactions with the target image. We formulate the segmentation problem as an inference of a conditional random field (CRF) over a segmentation mask and the target image, and parametrize this CRF by different weights (e.g., color, texture and smoothing). The weight parameters are learned via an energy margin maximization, which is solved using a constraint approximation scheme and the cutting plane method. Experimental results show that our method, by learning image-specific parameters automatically, outperforms other state-of-the-art interactive image segmentation techniques.",
"fno": "075P1B22",
"keywords": [
"Optimisation",
"Approximation Theory",
"Image Colour Analysis",
"Image Segmentation",
"Image Texture",
"Learning Artificial Intelligence",
"Cutting Plane Method",
"Learning",
"Image Specific Parameters",
"Interactive Segmentation",
"Interactive Image Segmentation",
"Offline Parameter Tuning",
"Simple User Interactions",
"Conditional Random Field",
"Segmentation Mask",
"Target Image",
"Color",
"Texture",
"Smoothing",
"Weight Parameters",
"Energy Margin Maximization",
"Constraint Approximation",
"Image Segmentation",
"Image Color Analysis",
"Smoothing Methods",
"Approximation Methods",
"Training",
"Learning Systems",
"Indexes"
],
"authors": [
{
"affiliation": "Univ. of Hong Kong, Hong Kong, China",
"fullName": "K-Y K. Wong",
"givenName": "K-Y K.",
"surname": "Wong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Hong Kong, Hong Kong, China",
"fullName": "Hao Zhou",
"givenName": null,
"surname": "Hao Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Hong Kong, Hong Kong, China",
"fullName": "D. Schnieders",
"givenName": "D.",
"surname": "Schnieders",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Hong Kong, Hong Kong, China",
"fullName": "Zhanghui Kuang",
"givenName": null,
"surname": "Zhanghui Kuang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Hong Kong, Hong Kong, China",
"fullName": "Yizhou Yu",
"givenName": null,
"surname": "Yizhou Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hong Kong Polytech. Univ., Hong Kong, China",
"fullName": "Bo Peng",
"givenName": null,
"surname": "Bo Peng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "590-597",
"year": "2012",
"issn": "1063-6919",
"isbn": "978-1-4673-1226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "074P1B21",
"articleId": "12OmNvFHfDe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "076P1B23",
"articleId": "12OmNBOllje",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icnc/2008/3304/5/3304e433",
"title": "An Segmentation Algorithm of Texture Image Based on DWT",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2008/3304e433/12OmNBRbksN",
"parentPublication": {
"id": "proceedings/icnc/2008/3304/5",
"title": "2008 Fourth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2010/4151/2/4151b232",
"title": "Research on Evaluation of Image Segmentation Based on Measurement Method of Particle's Parameters",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2010/4151b232/12OmNCesrbw",
"parentPublication": {
"id": "proceedings/ihmsc/2010/4151/2",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2001/1372/0/13720569",
"title": "An Unsupervised Segmentation Framework For Texture Image Queries",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2001/13720569/12OmNqBKUex",
"parentPublication": {
"id": "proceedings/compsac/2001/1372/0",
"title": "25th Annual International Computer Software and Applications Conference. COMPSAC 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2014/4284/0/4284a128",
"title": "MRF and CRF Based Image Denoising and Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a128/12OmNwMFMl8",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2013/2809/0/2809a111",
"title": "An Infrared Pedestrians Image Segmentation Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2013/2809a111/12OmNxb5hui",
"parentPublication": {
"id": "proceedings/icinis/2013/2809/0",
"title": "2013 6th International Conference on Intelligent Networks and Intelligent Systems (ICINIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831220",
"title": "Fractional discrimination for texture image segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831220/12OmNzBwGFq",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e545",
"title": "Semantic Image Segmentation with Task-Specific Edge Detection Using CNNs and a Discriminatively Trained Domain Transform",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e545/12OmNzZEADV",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/3/81833054",
"title": "Fully Bayesian image segmentation-an engineering perspective",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81833054/12OmNzyGH6h",
"parentPublication": {
"id": "proceedings/icip/1997/8183/3",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/08/ttp2010081406",
"title": "Image Segmentation with a Unified Graphical Model",
"doi": null,
"abstractUrl": "/journal/tp/2010/08/ttp2010081406/13rRUwjoNyi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2012/02/ttp2012020315",
"title": "Image Segmentation by Probabilistic Bottom-Up Aggregation and Cue Integration",
"doi": null,
"abstractUrl": "/journal/tp/2012/02/ttp2012020315/13rRUxASuBA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1h81oza1jwY",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"acronym": "icdar",
"groupId": "1000219",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1h81qkiMPkY",
"doi": "10.1109/ICDAR.2019.00216",
"title": "Linking Art through Human Poses",
"normalizedTitle": "Linking Art through Human Poses",
"abstract": "We address the discovery of composition transfer in artworks based on their visual content. Automated analysis of large art collections, which are growing as a result of art digitization among museums and galleries, is an important tool for art history and assists cultural heritage preservation. Modern image retrieval systems offer good performance on visually similar artworks, but fail in the cases of more abstract composition transfer. The proposed approach links artworks through a pose similarity of human figures depicted in images. Human figures are the subject of a large fraction of visual art from middle ages to modernity and their distinctive poses were often a source of inspiration among artists. The method consists of two steps - fast pose matching and robust spatial verification. We experimentally show that explicit human pose matching is superior to standard content-based image retrieval methods on a manually annotated art composition transfer dataset.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We address the discovery of composition transfer in artworks based on their visual content. Automated analysis of large art collections, which are growing as a result of art digitization among museums and galleries, is an important tool for art history and assists cultural heritage preservation. Modern image retrieval systems offer good performance on visually similar artworks, but fail in the cases of more abstract composition transfer. The proposed approach links artworks through a pose similarity of human figures depicted in images. Human figures are the subject of a large fraction of visual art from middle ages to modernity and their distinctive poses were often a source of inspiration among artists. The method consists of two steps - fast pose matching and robust spatial verification. We experimentally show that explicit human pose matching is superior to standard content-based image retrieval methods on a manually annotated art composition transfer dataset.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We address the discovery of composition transfer in artworks based on their visual content. Automated analysis of large art collections, which are growing as a result of art digitization among museums and galleries, is an important tool for art history and assists cultural heritage preservation. Modern image retrieval systems offer good performance on visually similar artworks, but fail in the cases of more abstract composition transfer. The proposed approach links artworks through a pose similarity of human figures depicted in images. Human figures are the subject of a large fraction of visual art from middle ages to modernity and their distinctive poses were often a source of inspiration among artists. The method consists of two steps - fast pose matching and robust spatial verification. We experimentally show that explicit human pose matching is superior to standard content-based image retrieval methods on a manually annotated art composition transfer dataset.",
"fno": "301400b338",
"keywords": [
"Art",
"Content Based Retrieval",
"History",
"Image Retrieval",
"Pose Estimation",
"Visual Content",
"Art Collections",
"Art Digitization",
"Art History",
"Cultural Heritage Preservation",
"Abstract Composition Transfer",
"Pose Similarity",
"Human Figures",
"Visual Art",
"Content Based Image Retrieval Methods",
"Manually Annotated Art Composition Transfer Dataset",
"Explicit Human Pose Matching",
"Visualization",
"Painting",
"Art",
"Image Retrieval",
"Detectors",
"Task Analysis",
"History",
"Pose Matching",
"Art Retrieval",
"Inspiration Discovery"
],
"authors": [
{
"affiliation": "Visual Recognition Group, FEE, Czech Technical University in Prague",
"fullName": "Tomas Jenicek",
"givenName": "Tomas",
"surname": "Jenicek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visual Recognition Group, FEE, Czech Technical University in Prague",
"fullName": "Ondřej Chum",
"givenName": "Ondřej",
"surname": "Chum",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1338-1345",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3014-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "301400b332",
"articleId": "1h81tQ6vYic",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "301400b346",
"articleId": "1h81uTdbx3G",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2012/4702/0/4702a395",
"title": "Learning about Art History by Exploratory Search, Contextual View and Social Tags",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a395/12OmNAHmOuQ",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815036",
"title": "Feature Extraction and Analysis for Scientific Understanding of Visual Art",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815036/12OmNC8dgbu",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/culture-computing/2013/5047/0/5047a188",
"title": "Interactive Human: Seen through Digital Art",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2013/5047a188/12OmNwE9OEg",
"parentPublication": {
"id": "proceedings/culture-computing/2013/5047/0",
"title": "2013 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2014/6854/0/6854a314",
"title": "Feature Extraction and Analysis for Scientific Understanding of Visual Art",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a314/12OmNxxNbQ4",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a258",
"title": "Omni-Learning XR Technologies and Visitor-Centered Experience in the Smart Art Museum",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a258/17D45WODasM",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f402",
"title": "Explain Me the Painting: Multi-Topic Knowledgeable Art Description Generation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f402/1BmFcr6niow",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2022/7218/0/09859412",
"title": "Quantification of Artist Representativity within an Art Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2022/09859412/1G4F25H2uDm",
"parentPublication": {
"id": "proceedings/icmew/2022/7218/0",
"title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a667",
"title": "Art Rich: Place Your AR Artwork",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a667/1J7WguJqzWE",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a005",
"title": "How Will Sense of Values Change during Art Appreciation?",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a005/1gAx0SPxuvu",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2020/8771/0/09122364",
"title": "An Examination of a Support Tool for Designing Shadow Box Art",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2020/09122364/1kRSeHyD3Rm",
"parentPublication": {
"id": "proceedings/nicoint/2020/8771/0",
"title": "2020 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1vb9aFdLGtq",
"title": "2021 6th International Conference on Smart Grid and Electrical Automation (ICSGEA)",
"acronym": "icsgea",
"groupId": "1814444",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1vb9iSLa9VK",
"doi": "10.1109/ICSGEA53208.2021.00029",
"title": "Design of online interactive education system of art course based on B/S architecture",
"normalizedTitle": "Design of online interactive education system of art course based on B/S architecture",
"abstract": "In order to improve the accuracy of resource mining and operation efficiency of online interactive education system for art courses, an online interactive education system for art courses based on B/S architecture is proposed and designed. In the B/S architecture, the overall structure of the system is designed. The hardware part of the system includes system login module, art course resource interactive communication module and interactive teaching module, which can fully improve the interactive education performance of the system. In the software part of the system, the feature clustering method is used to excavate the teaching resources of art course, so as to improve the utilization ability of art course resources and complete the overall design of the system. The experimental results show that, compared with the traditional education system, the designed education system has higher mining accuracy of art curriculum resources, and the designed system has lower response time. Therefore, it shows that the designed education system has better performance in practical application.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to improve the accuracy of resource mining and operation efficiency of online interactive education system for art courses, an online interactive education system for art courses based on B/S architecture is proposed and designed. In the B/S architecture, the overall structure of the system is designed. The hardware part of the system includes system login module, art course resource interactive communication module and interactive teaching module, which can fully improve the interactive education performance of the system. In the software part of the system, the feature clustering method is used to excavate the teaching resources of art course, so as to improve the utilization ability of art course resources and complete the overall design of the system. The experimental results show that, compared with the traditional education system, the designed education system has higher mining accuracy of art curriculum resources, and the designed system has lower response time. Therefore, it shows that the designed education system has better performance in practical application.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to improve the accuracy of resource mining and operation efficiency of online interactive education system for art courses, an online interactive education system for art courses based on B/S architecture is proposed and designed. In the B/S architecture, the overall structure of the system is designed. The hardware part of the system includes system login module, art course resource interactive communication module and interactive teaching module, which can fully improve the interactive education performance of the system. In the software part of the system, the feature clustering method is used to excavate the teaching resources of art course, so as to improve the utilization ability of art course resources and complete the overall design of the system. The experimental results show that, compared with the traditional education system, the designed education system has higher mining accuracy of art curriculum resources, and the designed system has lower response time. Therefore, it shows that the designed education system has better performance in practical application.",
"fno": "326300a106",
"keywords": [
"Computer Aided Instruction",
"Data Mining",
"Educational Courses",
"Interactive Systems",
"Pattern Clustering",
"Teaching",
"Online Interactive Education System",
"Resource Mining",
"Operation Efficiency",
"System Login Module",
"Interactive Communication Module",
"Interactive Teaching Module",
"Art Course Resources",
"Art Curriculum Resources",
"B S Architecture",
"Feature Clustering Method",
"Deep Learning",
"Art",
"Automation",
"Clustering Methods",
"Education",
"Computer Architecture",
"Software",
"B S Architecture",
"Art Curriculum",
"Online Interaction",
"Education System"
],
"authors": [
{
"affiliation": "Suzhou art & design technology institute,Su'zhou,China,215104",
"fullName": "Xiaozhen Song",
"givenName": "Xiaozhen",
"surname": "Song",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icsgea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-05-01T00:00:00",
"pubType": "proceedings",
"pages": "106-111",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3263-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "326300a101",
"articleId": "1vb9gDzKQAo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "326300a112",
"articleId": "1vb9eS3ZPfa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ispaw/2011/4429/0/4429a066",
"title": "Analysis about Application of Learning System into Interactive Digital Art Implementation",
"doi": null,
"abstractUrl": "/proceedings-article/ispaw/2011/4429a066/12OmNAHmOvO",
"parentPublication": {
"id": "proceedings/ispaw/2011/4429/0",
"title": "Parallel and Distributed Processing with Applications Workshops, IEEE International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/2/3987b773",
"title": "Application of Computer Multimedia Technology in Art Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987b773/12OmNAQrYC6",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/2",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a838",
"title": "The Status and Model of Art and Design Online Course in the United States: Take the Art and Design Online Course in University of California, Berkeley as an Example",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a838/12OmNxFJXsk",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a589",
"title": "School and Enterprise Remote Interactive Visualization Approach in Environmental Art Design Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a589/12OmNzWfp96",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2022/9978/0/997800b158",
"title": "Design and Implementation of Mobile Interactive Learning Platform for Art Teaching of Universities",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2022/997800b158/1Byes9mZXjy",
"parentPublication": {
"id": "proceedings/icmtma/2022/9978/0",
"title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900b424",
"title": "The Application of Digital Media Art in Art Museum Public Education Based on Human-Computer Interaction Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900b424/1C8GniYd4cM",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cipae/2020/8223/0/822300a259",
"title": "Research on interactive mode of children’s museum based on guided teaching",
"doi": null,
"abstractUrl": "/proceedings-article/cipae/2020/822300a259/1rSRhLDALkY",
"parentPublication": {
"id": "proceedings/cipae/2020/8223/0",
"title": "2020 International Conference on Computers, Information Processing and Advanced Education (CIPAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeim/2020/9623/0/962300a179",
"title": "Research on the Design of Interactive Teaching System of College Art Course Based on Computer Network",
"doi": null,
"abstractUrl": "/proceedings-article/icmeim/2020/962300a179/1syvpxlYx1K",
"parentPublication": {
"id": "proceedings/icmeim/2020/9623/0",
"title": "2020 International Conference on Modern Education and Information Management (ICMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2021/3892/0/389200a703",
"title": "Multi interactive and mixed teaching system of law distance education based on P2P technology",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2021/389200a703/1t2ncTPUJ9e",
"parentPublication": {
"id": "proceedings/icmtma/2021/3892/0",
"title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icid/2020/1481/0/440500a090",
"title": "New Intelligent Dialogue Consciousness Under the Concept of Ecological Art Education",
"doi": null,
"abstractUrl": "/proceedings-article/icid/2020/440500a090/1taFqBOgtiM",
"parentPublication": {
"id": "proceedings/icid/2020/1481/0",
"title": "2020 International Conference on Intelligent Design (ICID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1vg7AGzvxNC",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1vg7DnnY38k",
"doi": "10.1109/ICVRV51359.2020.00074",
"title": "Design and implementation of immersive ink art",
"normalizedTitle": "Design and implementation of immersive ink art",
"abstract": "Nowadays, with the continuous development and progress of computer graphics, computer technology provides a new medium form for traditional ink art. This new form is mainly to use 3D animation modeling and Unity3D software platform to process the ink-style rendering of the 3D model. First, by extracting feature lines and local pixel regions as the basic outline of the object, the surface texture material of the object is pasted with ink-style texture material to form a simple ink effect. And then the camera is adjusted or Gaussian blur processing, depth and the processing of normal texture makes the ink and wash effect more realistic, achieving the fusion of modern digital technology and traditional ink and landscape painting.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Nowadays, with the continuous development and progress of computer graphics, computer technology provides a new medium form for traditional ink art. This new form is mainly to use 3D animation modeling and Unity3D software platform to process the ink-style rendering of the 3D model. First, by extracting feature lines and local pixel regions as the basic outline of the object, the surface texture material of the object is pasted with ink-style texture material to form a simple ink effect. And then the camera is adjusted or Gaussian blur processing, depth and the processing of normal texture makes the ink and wash effect more realistic, achieving the fusion of modern digital technology and traditional ink and landscape painting.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Nowadays, with the continuous development and progress of computer graphics, computer technology provides a new medium form for traditional ink art. This new form is mainly to use 3D animation modeling and Unity3D software platform to process the ink-style rendering of the 3D model. First, by extracting feature lines and local pixel regions as the basic outline of the object, the surface texture material of the object is pasted with ink-style texture material to form a simple ink effect. And then the camera is adjusted or Gaussian blur processing, depth and the processing of normal texture makes the ink and wash effect more realistic, achieving the fusion of modern digital technology and traditional ink and landscape painting.",
"fno": "049700a296",
"keywords": [
"Art",
"Computer Animation",
"Computer Graphics",
"Feature Extraction",
"Image Texture",
"Ink",
"Rendering Computer Graphics",
"Solid Modelling",
"Basic Outline",
"Surface Texture Material",
"Ink Style Texture Material",
"Simple Ink Effect",
"Gaussian Blur Processing",
"Normal Texture",
"Wash Effect",
"Modern Digital Technology",
"Landscape Painting",
"Immersive Ink Art",
"Continuous Development",
"Computer Graphics",
"Computer Technology",
"Medium Form",
"Traditional Ink Art",
"3 D Animation Modeling",
"Unity 3 D Software Platform",
"Ink Style Rendering",
"Feature Lines",
"Local Pixel Regions",
"Solid Modeling",
"Visualization",
"Three Dimensional Displays",
"Art",
"Computational Modeling",
"Ink",
"Virtual Reality",
"Shader",
"Ink Style",
"Image Processing",
"Game Engine"
],
"authors": [
{
"affiliation": "Northern China University of Technology The School of Information Science and Technology,Beijing,China",
"fullName": "Jingxing Wang",
"givenName": "Jingxing",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northern China University of Technology The School of Information Science and Technology,Beijing,China",
"fullName": "Feng Quan Zhang",
"givenName": "Feng Quan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northern China University of Technology The School of Information Science and Technology,Beijing,China",
"fullName": "Weijia Xu",
"givenName": "Weijia",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "296-297",
"year": "2020",
"issn": "2375-141X",
"isbn": "978-1-6654-0497-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1vg7Dcrh984",
"name": "picvrv202004970-09479839s1-mm_049700a296.zip",
"size": "23 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/picvrv202004970-09479839s1-mm_049700a296.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "049700a294",
"articleId": "1vg7Pg0aZ4A",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "049700a298",
"articleId": "1vg88AQa3ao",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/das/2008/3337/0/3337a622",
"title": "Digital Ink to Form Alignment for Electronic Clipboard Devices",
"doi": null,
"abstractUrl": "/proceedings-article/das/2008/3337a622/12OmNrkT7v3",
"parentPublication": {
"id": "proceedings/das/2008/3337/0",
"title": "2008 The Eighth IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2015/9393/0/9393a817",
"title": "The Application of Ink Element in the Creation of Digital Image",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2015/9393a817/12OmNxHryik",
"parentPublication": {
"id": "proceedings/isdea/2015/9393/0",
"title": "2015 Sixth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2005/2392/0/23920317",
"title": "Physical Modeling of \"Xuan\" Paper in the Simulation of Chinese Ink-Wash Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2005/23920317/12OmNzTppDM",
"parentPublication": {
"id": "proceedings/cgiv/2005/2392/0",
"title": "International Conference on Computer Graphics, Imaging and Visualization (CGIV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a089",
"title": "Artwork-Based 3D Ink Style Modeling and Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a089/12OmNzlUKmq",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0235",
"title": "Image-Based Color Ink Diffusion Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0235/13rRUxASuSC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2018/7616/0/761600a205",
"title": "A Method for Ink-Wash Painting Rendering for 3D Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a205/17D45WGGoLy",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/i-span/2018/8534/0/853400a193",
"title": "Two-Stage Color ink Painting Style Transfer via Convolution Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/i-span/2018/853400a193/17D45XvMcbn",
"parentPublication": {
"id": "proceedings/i-span/2018/8534/0",
"title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icebe/2022/9244/0/924400a001",
"title": "Aesthetic Research of Dynamic Ink Contour Stylized Material Based on Movable Camera in 3D Game and Their Real-time Rendering Method Taking the OKAMI HD Game for Example",
"doi": null,
"abstractUrl": "/proceedings-article/icebe/2022/924400a001/1Kzzgn4fqpy",
"parentPublication": {
"id": "proceedings/icebe/2022/9244/0",
"title": "2022 IEEE International Conference on e-Business Engineering (ICEBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccgiv/2022/9250/0/925000a006",
"title": "An Example­Based Method for 3D Real­time Rendering In Chinese Ink Style",
"doi": null,
"abstractUrl": "/proceedings-article/iccgiv/2022/925000a006/1LxfoK6kZzi",
"parentPublication": {
"id": "proceedings/iccgiv/2022/9250/0",
"title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a284",
"title": "Real-time Rendering of 3D Animal Models in Chinese Ink Painting Style",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a284/1p1grC3XnGw",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAYoKmw",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxYbSX3",
"doi": "10.1109/ISMAR.2013.6671808",
"title": "Augmenting markerless complex 3D objects by combining geometrical and color edge information",
"normalizedTitle": "Augmenting markerless complex 3D objects by combining geometrical and color edge information",
"abstract": "This paper presents a method to address the issue of augmenting a markerless 3D object with a complex shape. It relies on a model-based tracker which takes advantage of GPU acceleration and 3D rendering in order to handle the complete 3D model, whose sharp edges are efficiently extracted. In the pose estimation step, we propose to robustly combine geometrical and color edge-based features in the nonlinear minimization process, and to integrate multiple-hypotheses in the geometrical edge-based registration phase. Our tracking method shows promising results for augmented reality applications, with a Kinect-based reconstructed 3D model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a method to address the issue of augmenting a markerless 3D object with a complex shape. It relies on a model-based tracker which takes advantage of GPU acceleration and 3D rendering in order to handle the complete 3D model, whose sharp edges are efficiently extracted. In the pose estimation step, we propose to robustly combine geometrical and color edge-based features in the nonlinear minimization process, and to integrate multiple-hypotheses in the geometrical edge-based registration phase. Our tracking method shows promising results for augmented reality applications, with a Kinect-based reconstructed 3D model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a method to address the issue of augmenting a markerless 3D object with a complex shape. It relies on a model-based tracker which takes advantage of GPU acceleration and 3D rendering in order to handle the complete 3D model, whose sharp edges are efficiently extracted. In the pose estimation step, we propose to robustly combine geometrical and color edge-based features in the nonlinear minimization process, and to integrate multiple-hypotheses in the geometrical edge-based registration phase. Our tracking method shows promising results for augmented reality applications, with a Kinect-based reconstructed 3D model.",
"fno": "06671808",
"keywords": [
"Three Dimensional Displays",
"Solid Modeling",
"Image Edge Detection",
"Image Color Analysis",
"Robustness",
"Augmented Reality",
"Computational Modeling",
"Model Based Tracking",
"3 D Visual Tracking"
],
"authors": [
{
"affiliation": "INRIA Rennes, Rennes, France",
"fullName": "Antoine Petit",
"givenName": "Antoine",
"surname": "Petit",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IRISA, Rennes, France",
"fullName": "Eric Marchand",
"givenName": "Eric",
"surname": "Marchand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Astrium, USA",
"fullName": "Keyvan Kanani",
"givenName": "Keyvan",
"surname": "Kanani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-10-01T00:00:00",
"pubType": "proceedings",
"pages": "287-288",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2869-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06671807",
"articleId": "12OmNqFJhRx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06671809",
"articleId": "12OmNyXMQan",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ihmsc/2011/4444/2/4444b047",
"title": "Piano AR: A Markerless Augmented Reality Based Piano Teaching System",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2011/4444b047/12OmNASILSp",
"parentPublication": {
"id": "proceedings/ihmsc/2011/4444/2",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2007/1749/0/04538829",
"title": "Automatic contour model creation out of polygonal CAD models for markerless Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538829/12OmNBp52FX",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550212",
"title": "Poster: Markerless fingertip-based 3D interaction for handheld augmented reality in a small workspace",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550212/12OmNBsue2b",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2015/9628/0/9628a059",
"title": "Augmented Reality Tool for Markerless Virtual Try-on around Human Arm",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a059/12OmNBtCCIY",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2015/9628/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549440",
"title": "Providing guidance for maintenance operations using automatic markerless augmented reality system",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549440/12OmNxcMSep",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2007/1749/0/04538839",
"title": "Real-Time Object Tracking for Augmented Reality Combining Graph Cuts and Optical Flow",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538839/12OmNyU63tI",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a724",
"title": "Towards Scalable and Real-time Markerless Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a724/1CJddGGn1n2",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cphs/2022/8203/0/820300a024",
"title": "Here To Stay: A Quantitative Comparison of Virtual Object Stability in Markerless Mobile AR",
"doi": null,
"abstractUrl": "/proceedings-article/cphs/2022/820300a024/1Eyj6ObtdWU",
"parentPublication": {
"id": "proceedings/cphs/2022/8203/0",
"title": "2022 2nd International Workshop on Cyber-Physical-Human System Design and Implementation (CPHS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a433",
"title": "Mixed Reality Application: A Framework of Markerless Assembly Guidance System with Hololens Glass",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a433/1ap5zkHZ9OU",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2019/07/08747287",
"title": "Augmenting Cognition Through Edge Computing",
"doi": null,
"abstractUrl": "/magazine/co/2019/07/08747287/1bcFnft1ryM",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyugz58",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"acronym": "cgiv",
"groupId": "1001775",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxw5B86",
"doi": "10.1109/CGiV.2016.14",
"title": "Finite Element Simulation of 2.5/3D Shaped and Rigid Electronic Circuits",
"normalizedTitle": "Finite Element Simulation of 2.5/3D Shaped and Rigid Electronic Circuits",
"abstract": "Today a need is emerging for embedding electronic and sensor functions in the products which needs these functions, and, importantly, to do this without noticeably influencing the mechanical design of the product. This contribution describes an approach used to produce a 2.5/3D free-form rigid and smart objects or randomly shaped circuit. The proposed fabrication process of shaped circuit is compatible with a typical printed circuit manufacturing and electronics assembly. Once the circuit is completed in its flat shape, its random final functional shape is given using thermoforming. In order to be able to deform a given flat circuit to its final form with predictable final spatial positions of components and interconnections. A FEM simulation is conducted to model the thermoforming of polymer based electronic circuits. As one of the process outputs, the wall thickness distribution predicted for the final part is compared with the experimental results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Today a need is emerging for embedding electronic and sensor functions in the products which needs these functions, and, importantly, to do this without noticeably influencing the mechanical design of the product. This contribution describes an approach used to produce a 2.5/3D free-form rigid and smart objects or randomly shaped circuit. The proposed fabrication process of shaped circuit is compatible with a typical printed circuit manufacturing and electronics assembly. Once the circuit is completed in its flat shape, its random final functional shape is given using thermoforming. In order to be able to deform a given flat circuit to its final form with predictable final spatial positions of components and interconnections. A FEM simulation is conducted to model the thermoforming of polymer based electronic circuits. As one of the process outputs, the wall thickness distribution predicted for the final part is compared with the experimental results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Today a need is emerging for embedding electronic and sensor functions in the products which needs these functions, and, importantly, to do this without noticeably influencing the mechanical design of the product. This contribution describes an approach used to produce a 2.5/3D free-form rigid and smart objects or randomly shaped circuit. The proposed fabrication process of shaped circuit is compatible with a typical printed circuit manufacturing and electronics assembly. Once the circuit is completed in its flat shape, its random final functional shape is given using thermoforming. In order to be able to deform a given flat circuit to its final form with predictable final spatial positions of components and interconnections. A FEM simulation is conducted to model the thermoforming of polymer based electronic circuits. As one of the process outputs, the wall thickness distribution predicted for the final part is compared with the experimental results.",
"fno": "0811a024",
"keywords": [
"Copper",
"Thermoforming",
"Polymers",
"Three Dimensional Displays",
"Finite Element Analysis",
"Integrated Circuit Modeling",
"Substrates",
"Hyperelastic",
"2 5 3 D Circuit",
"Thermoforming",
"Numerical Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Imen Chtioui",
"givenName": "Imen",
"surname": "Chtioui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Frederick Bossyut",
"givenName": "Frederick",
"surname": "Bossyut",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mohamed Hedi Bedoui",
"givenName": "Mohamed Hedi",
"surname": "Bedoui",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cgiv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "24-28",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0811-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0811a018",
"articleId": "12OmNwx3Q8F",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0811a029",
"articleId": "12OmNvHY2Et",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccms/2010/3941/1/3941a175",
"title": "Finite Element Analysis and Structural Improvement of Diesel Engine Connecting Rod",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/3941a175/12OmNAgY7nS",
"parentPublication": {
"id": "proceedings/iccms/2010/3941/3",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/1/3987a206",
"title": "A New Non-rigid Image Registration Algorithm Using the Finite-Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987a206/12OmNCesr3f",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/1",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2014/7434/0/7434a330",
"title": "On the Field Patterns in Elliptic-Shaped Microshield Lines Loaded with Left-Handed Materials by the Edge-Based Finite Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2014/7434a330/12OmNrHB1TR",
"parentPublication": {
"id": "proceedings/cis/2014/7434/0",
"title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crc/2017/0677/0/0677a080",
"title": "Vibration Analysis of U-Shaped Beam Electrothermal Microactuators",
"doi": null,
"abstractUrl": "/proceedings-article/crc/2017/0677a080/12OmNyuy9MI",
"parentPublication": {
"id": "proceedings/crc/2017/0677/0",
"title": "2017 2nd International Conference on Cybernetics, Robotics and Control (CRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2010/4047/2/4047b237",
"title": "The Shear Strength of T-shaped Broad-Limb Special Column Using the Finite Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/4047b237/12OmNz3bdJi",
"parentPublication": {
"id": "proceedings/icic/2010/4047/1",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dac/1979/9999/0/01600108",
"title": "A Procedure for Checking the Topological Consistency of A 2-D or 3-D Finite Element Mesh",
"doi": null,
"abstractUrl": "/proceedings-article/dac/1979/01600108/12OmNzYwbY9",
"parentPublication": {
"id": "proceedings/dac/1979/9999/0",
"title": "16th Design Automation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999790",
"title": "Forming a Vertical Interconnect Structure Using Dry Film Processing for Fan Out Wafer Level Packaging",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999790/12OmNzaQoxy",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000b020",
"title": "A Finite Element Analysis of Internal Fixations for U-Shaped Sacral Fracture",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000b020/17D45VTRop3",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2019/5045/0/504500a169",
"title": "Design Improvement and Finite Element Analysis of Ceramic Gas Kiln Steel Structure Frame",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2019/504500a169/1hHLo0X7weA",
"parentPublication": {
"id": "proceedings/wcmeim/2019/5045/0",
"title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icectt/2020/9928/0/992800a129",
"title": "An Analysis on the Design of a Special-Shaped Plastic Snap-fit Based on the Technology of Finite Element",
"doi": null,
"abstractUrl": "/proceedings-article/icectt/2020/992800a129/1oa5kPgHAQg",
"parentPublication": {
"id": "proceedings/icectt/2020/9928/0",
"title": "2020 5th International Conference on Electromechanical Control Technology and Transportation (ICECTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1lSPqCX04",
"doi": "10.1109/CVPR52688.2022.00230",
"title": "3D Scene Painting via Semantic Image Synthesis",
"normalizedTitle": "3D Scene Painting via Semantic Image Synthesis",
"abstract": "We propose a novel approach to 3D scene painting using a configurable 3D scene layout. Our approach takes a 3D scene with semantic class labels as input and trains a 3D scene painting network that synthesizes color values for the input 3D scene. We exploit an off-the-shelf 2D seman-tic image synthesis method to teach the 3D painting net-work without explicit color supervision. Experiments show that our approach produces images with geometrically cor-rect structures and supports scene manipulation, such as the change of viewpoint, object poses, and painting style. Our approach provides rich controllability to synthesized images in the aspect of 3D geometry.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel approach to 3D scene painting using a configurable 3D scene layout. Our approach takes a 3D scene with semantic class labels as input and trains a 3D scene painting network that synthesizes color values for the input 3D scene. We exploit an off-the-shelf 2D seman-tic image synthesis method to teach the 3D painting net-work without explicit color supervision. Experiments show that our approach produces images with geometrically cor-rect structures and supports scene manipulation, such as the change of viewpoint, object poses, and painting style. Our approach provides rich controllability to synthesized images in the aspect of 3D geometry.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel approach to 3D scene painting using a configurable 3D scene layout. Our approach takes a 3D scene with semantic class labels as input and trains a 3D scene painting network that synthesizes color values for the input 3D scene. We exploit an off-the-shelf 2D seman-tic image synthesis method to teach the 3D painting net-work without explicit color supervision. Experiments show that our approach produces images with geometrically cor-rect structures and supports scene manipulation, such as the change of viewpoint, object poses, and painting style. Our approach provides rich controllability to synthesized images in the aspect of 3D geometry.",
"fno": "694600c252",
"keywords": [
"Image Classification",
"Image Colour Analysis",
"Learning Artificial Intelligence",
"Object Detection",
"Rendering Computer Graphics",
"Solid Modelling",
"Semantic Image Synthesis",
"Configurable 3 D Scene Layout",
"Semantic Class Labels",
"3 D Scene Painting Network",
"Input 3 D Scene",
"Seman Tic Image Synthesis Method",
"3 D Painting Net Work",
"Cor Rect Structures",
"Supports Scene Manipulation",
"Painting Style",
"Training",
"Solid Modeling",
"Three Dimensional Displays",
"Image Color Analysis",
"Image Synthesis",
"Machine Vision",
"Semantics"
],
"authors": [
{
"affiliation": "POSTECH GSAI & CSE",
"fullName": "Jaebong Jeong",
"givenName": "Jaebong",
"surname": "Jeong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "POSTECH GSAI & CSE",
"fullName": "Janghun Jo",
"givenName": "Janghun",
"surname": "Jo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "POSTECH GSAI & CSE",
"fullName": "Sunghyun Cho",
"givenName": "Sunghyun",
"surname": "Cho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "POSTECH GSAI & CSE",
"fullName": "Jaesik Park",
"givenName": "Jaesik",
"surname": "Park",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2252-2262",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1lSLGr4Bi",
"name": "pcvpr202269460-09878815s1-mm_694600c252.zip",
"size": "1.08 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878815s1-mm_694600c252.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600c242",
"articleId": "1H0NSzhsiJ2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600c263",
"articleId": "1H1iGI30yWY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2019/09/08419282",
"title": "High Relief from Brush Painting",
"doi": null,
"abstractUrl": "/journal/tg/2019/09/08419282/13rRUxcKzVn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a687",
"title": "Data Augmented 3D Semantic Scene Completion with 2D Segmentation Priors",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a687/1B1401Oq8I8",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f610",
"title": "Scene Synthesis via Uncertainty-Driven Attribute Synchronization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f610/1BmFoAFYs7K",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a974",
"title": "3DColAR: Exploring 3D Color Selection and Surface Painting for Head Worn Augmented Reality using Hand Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a974/1CJcAGzhwxq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600e329",
"title": "Control-NeRF: Editable Feature Volumes for Scene Rendering and Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600e329/1L8qzXVyRlS",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d960",
"title": "Learning 3D Semantic Scene Graphs From 3D Indoor Reconstructions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d960/1m3nHniwEg0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/7.168E197",
"title": "3D Sketch-Aware Semantic Scene Completion via Semi-Supervised Structure Prior",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/7.168E197/1m3ngObnCda",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2020/4272/0/427200a400",
"title": "Multi-attribute Guided Painting Generation",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2020/427200a400/1mA9Z4FFJ7i",
"parentPublication": {
"id": "proceedings/mipr/2020/4272/0",
"title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a253",
"title": "Multi-touch Simulation System for Sand Painting",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a253/1vg8dCS9bhu",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a324",
"title": "Semantic Scene Completion via Integrating Instances and Scene in-the-Loop",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a324/1yeLH3ZxKaQ",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1wzs0vrjyWQ",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yJYtFBEASI",
"doi": "10.1109/CVPRW53098.2021.00117",
"title": "Point2color: 3D Point Cloud Colorization Using a Conditional Generative Network and Differentiable Rendering for Airborne LiDAR",
"normalizedTitle": "Point2color: 3D Point Cloud Colorization Using a Conditional Generative Network and Differentiable Rendering for Airborne LiDAR",
"abstract": "Airborne LiDAR observations are very effective for providing accurate 3D point clouds, and archived data are becoming available to the public. In many cases, only geometric information is available in the published 3D point cloud observed by airborne LiDAR (airborne 3D point cloud), and geometric information alone is not readable. Thus, it is important to colorize airborne 3D point clouds to improve visual readability. A scheme for 3D point cloud colorization using a conditional generative adversarial network (cGAN) was proposed, but it is difficult to apply to airborne LiDAR because the method is for artificial CAD models. Since airborne 3D point clouds are spread over a wider area than simple CAD models, it is important to evaluate them spatially in two-dimensional (2D) images. Currently, the differentiable renderer is the most reliable method to bridge 3D and 2D images. In this paper, we propose an airborne 3D point cloud colorization scheme called point2color using cGAN with points and rendered images. To achieve airborne 3D point cloud colorization, we estimate the color of each point with PointNet++ and render the estimated colored airborne 3D point cloud into a 2D image with a differentiable renderer. The network is then trained by minimizing the distance between real color and colorized fake color. The experimental results demonstrate the effectiveness of point2color using the IEEE GRSS 2018 Data Fusion Contest dataset with lower error than previous studies. Furthermore, an ablation study demonstrates the effectiveness of using a cGAN pipeline and 2D images via a differentiable renderer. Our code will be available at GitHub.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Airborne LiDAR observations are very effective for providing accurate 3D point clouds, and archived data are becoming available to the public. In many cases, only geometric information is available in the published 3D point cloud observed by airborne LiDAR (airborne 3D point cloud), and geometric information alone is not readable. Thus, it is important to colorize airborne 3D point clouds to improve visual readability. A scheme for 3D point cloud colorization using a conditional generative adversarial network (cGAN) was proposed, but it is difficult to apply to airborne LiDAR because the method is for artificial CAD models. Since airborne 3D point clouds are spread over a wider area than simple CAD models, it is important to evaluate them spatially in two-dimensional (2D) images. Currently, the differentiable renderer is the most reliable method to bridge 3D and 2D images. In this paper, we propose an airborne 3D point cloud colorization scheme called point2color using cGAN with points and rendered images. To achieve airborne 3D point cloud colorization, we estimate the color of each point with PointNet++ and render the estimated colored airborne 3D point cloud into a 2D image with a differentiable renderer. The network is then trained by minimizing the distance between real color and colorized fake color. The experimental results demonstrate the effectiveness of point2color using the IEEE GRSS 2018 Data Fusion Contest dataset with lower error than previous studies. Furthermore, an ablation study demonstrates the effectiveness of using a cGAN pipeline and 2D images via a differentiable renderer. Our code will be available at GitHub.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Airborne LiDAR observations are very effective for providing accurate 3D point clouds, and archived data are becoming available to the public. In many cases, only geometric information is available in the published 3D point cloud observed by airborne LiDAR (airborne 3D point cloud), and geometric information alone is not readable. Thus, it is important to colorize airborne 3D point clouds to improve visual readability. A scheme for 3D point cloud colorization using a conditional generative adversarial network (cGAN) was proposed, but it is difficult to apply to airborne LiDAR because the method is for artificial CAD models. Since airborne 3D point clouds are spread over a wider area than simple CAD models, it is important to evaluate them spatially in two-dimensional (2D) images. Currently, the differentiable renderer is the most reliable method to bridge 3D and 2D images. In this paper, we propose an airborne 3D point cloud colorization scheme called point2color using cGAN with points and rendered images. To achieve airborne 3D point cloud colorization, we estimate the color of each point with PointNet++ and render the estimated colored airborne 3D point cloud into a 2D image with a differentiable renderer. The network is then trained by minimizing the distance between real color and colorized fake color. The experimental results demonstrate the effectiveness of point2color using the IEEE GRSS 2018 Data Fusion Contest dataset with lower error than previous studies. Furthermore, an ablation study demonstrates the effectiveness of using a cGAN pipeline and 2D images via a differentiable renderer. Our code will be available at GitHub.",
"fno": "489900b062",
"keywords": [
"Solid Modeling",
"Visualization",
"Three Dimensional Displays",
"Laser Radar",
"Image Color Analysis",
"Atmospheric Modeling",
"Two Dimensional Displays"
],
"authors": [
{
"affiliation": "Tokyo Institute of Technology,Yokohama,Japan",
"fullName": "Takayuki Shinohara",
"givenName": "Takayuki",
"surname": "Shinohara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology,Yokohama,Japan",
"fullName": "Haoyi Xiu",
"givenName": "Haoyi",
"surname": "Xiu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology,Yokohama,Japan",
"fullName": "Masashi Matsuoka",
"givenName": "Masashi",
"surname": "Matsuoka",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1062-1071",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4899-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "489900b052",
"articleId": "1yJYhCJ2ZjO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "489900b072",
"articleId": "1yJYeaiNOmc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccnea/2017/3981/0/3981a221",
"title": "Lidar Image Classification Based on Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2017/3981a221/12OmNAHmOtI",
"parentPublication": {
"id": "proceedings/iccnea/2017/3981/0",
"title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2014/6239/0/6239a220",
"title": "Simple Octree Solution for Multi-resolution LiDAR Processing and Visualisation",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2014/6239a220/12OmNqFa5lS",
"parentPublication": {
"id": "proceedings/cit/2014/6239/0",
"title": "2014 IEEE International Conference on Computer and Information Technology (CIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a689",
"title": "Efficient Colorization of Large-Scale Point Cloud Using Multi-pass Z-Ordering",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a689/12OmNynJMKf",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b968",
"title": "Automatic Large-Scale 3D Building Shape Refinement Using Conditional Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b968/17D45X2fUFK",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a366",
"title": "ADAPTIVE ACQUISITION OF AIRBORNE LIDAR POINT CLOUD BASED ON DEEP REINFORCEMENT LEARNING",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a366/1GvdduhsaUo",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300a857",
"title": "Monocular 3D Object Detection with Pseudo-LiDAR Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300a857/1i5mFr4yElW",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800n3713",
"title": "Physically Realizable Adversarial Examples for LiDAR Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800n3713/1m3o75on8VG",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c846",
"title": "2D to 3D Medical Image Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c846/1uqGFquPKx2",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j934",
"title": "Cylindrical and Asymmetrical 3D Convolution Networks for LiDAR Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j934/1yeKH3VnVIY",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f523",
"title": "4D Panoptic LiDAR Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f523/1yeLogZvWrS",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzUPpw4",
"title": "IEEE Symposium on Information Visualization 2003",
"acronym": "infvis",
"groupId": "1000371",
"volume": "0",
"displayVolume": "0",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrYCXId",
"doi": "10.1109/INFVIS.2003.1249002",
"title": "Exploding the frame: designing for wall-size computer displays",
"normalizedTitle": "Exploding the frame: designing for wall-size computer displays",
"abstract": "High-resolution wall-size digital displays present significant new and different visual space to show and see imagery. The author has been working with two wall-size digital displays at Princeton University for five years and directing and producing IMAX films for a decade, and he has noted some unique design considerations for creating effective visual images when they are spread across entire walls. The author suggests these \"frameless\" screens - where images are so large we need to look around to see the entire field - need different ways of thinking about image design and visualization. Presenting such things as scale and detail take on new meaning when they can be displayed life-size and not shown in the context of one or many small frames such as we see everywhere. These design ideas will be of use for pervasive computing, interface research and design, interactive design, control design, representations of massive data sets, and creating effective displays of data for research and education.",
"abstracts": [
{
"abstractType": "Regular",
"content": "High-resolution wall-size digital displays present significant new and different visual space to show and see imagery. The author has been working with two wall-size digital displays at Princeton University for five years and directing and producing IMAX films for a decade, and he has noted some unique design considerations for creating effective visual images when they are spread across entire walls. The author suggests these \"frameless\" screens - where images are so large we need to look around to see the entire field - need different ways of thinking about image design and visualization. Presenting such things as scale and detail take on new meaning when they can be displayed life-size and not shown in the context of one or many small frames such as we see everywhere. These design ideas will be of use for pervasive computing, interface research and design, interactive design, control design, representations of massive data sets, and creating effective displays of data for research and education.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High-resolution wall-size digital displays present significant new and different visual space to show and see imagery. The author has been working with two wall-size digital displays at Princeton University for five years and directing and producing IMAX films for a decade, and he has noted some unique design considerations for creating effective visual images when they are spread across entire walls. The author suggests these \"frameless\" screens - where images are so large we need to look around to see the entire field - need different ways of thinking about image design and visualization. Presenting such things as scale and detail take on new meaning when they can be displayed life-size and not shown in the context of one or many small frames such as we see everywhere. These design ideas will be of use for pervasive computing, interface research and design, interactive design, control design, representations of massive data sets, and creating effective displays of data for research and education.",
"fno": "01249002",
"keywords": [
"Computer Displays",
"Screens Display",
"Wall Size Computer Displays",
"Wall Size Digital Displays",
"Visual Space",
"IMAX Films",
"Visual Images",
"Frameless Screens",
"Image Design",
"Image Visualization",
"Pervasive Computing",
"Interface Research",
"Interface Design",
"Interactive Design",
"Control Design",
"Data Set Representations",
"Data Displays",
"Display Walls",
"Tiled Displays",
"Visual Interface",
"Graphic Design",
"High Resolution Displays",
"Computer Displays",
"Production",
"Education",
"Large Screen Displays",
"Pervasive Computing",
"Control Design",
"Graphics",
"Data Visualization",
"Educational Institutions",
"Computer Science"
],
"authors": [
{
"affiliation": "Princeton Univ., NJ, USA",
"fullName": "B. Shedd",
"givenName": "B.",
"surname": "Shedd",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "infvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-01-01T00:00:00",
"pubType": "proceedings",
"pages": "7",
"year": "2003",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01249000",
"articleId": "12OmNrJiCXO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01249011",
"articleId": "12OmNx5Yvad",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/nbis/2009/3767/0/3767a612",
"title": "Study on Realistic Communication Technology with Tiled Displays Wall",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2009/3767a612/12OmNqBKUf2",
"parentPublication": {
"id": "proceedings/nbis/2009/3767/0",
"title": "2009 International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2009/3639/0/3639a554",
"title": "An Experiment on Tele-immersive Communication with Tiled Displays Wall over JGN2plus Network",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2009/3639a554/12OmNqJ8tuG",
"parentPublication": {
"id": "proceedings/waina/2009/3639/0",
"title": "2009 International Conference on Advanced Information Networking and Applications Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/caapwd/1992/2730/0/00217374",
"title": "Twenty-column visual displays",
"doi": null,
"abstractUrl": "/proceedings-article/caapwd/1992/00217374/12OmNqzcvJP",
"parentPublication": {
"id": "proceedings/caapwd/1992/2730/0",
"title": "Proceedings of the Johns Hopkins National Search for Computing Applications to Assist Persons with Disabilities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2003/2055/0/20550032",
"title": "Exploding the Frame: Designing for Wall-Size Computer Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2003/20550032/12OmNxHryk6",
"parentPublication": {
"id": "proceedings/ieee-infovis/2003/2055/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2003/2055/0/01249002",
"title": "Exploding the frame: designing for wall-size computer displays",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2003/01249002/12OmNy50ggl",
"parentPublication": {
"id": "proceedings/ieee-infovis/2003/2055/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811007",
"title": "Measurement Protocols for Medium-Field Distance Perception in Large-Screen Immersive Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811007/12OmNyeWdKg",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811033",
"title": "Creating Virtual 3D See-Through Experiences on Large-size 2D Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811033/12OmNzZEAqd",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122516",
"title": "Perception of Visual Variables on Tiled Wall-Sized Displays for Information Visualization Applications",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122516/13rRUwwJWFM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2006/04/r4096",
"title": "Interacting with Large Displays",
"doi": null,
"abstractUrl": "/magazine/co/2006/04/r4096/13rRUxjyX7d",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a011",
"title": "Collaborative Visual Analysis with Multi-level Information Sharing Using a Wall-Size Display and See-Through HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a011/1cMF7IJ33Lq",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAndiiX",
"title": "2015 IEEE 1st Workshop on Everyday Virtual Reality (WEVR)",
"acronym": "wevr",
"groupId": "1807824",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs0C9Ln",
"doi": "10.1109/WEVR.2015.7151687",
"title": "Interactive display conglomeration on the wall",
"normalizedTitle": "Interactive display conglomeration on the wall",
"abstract": "We present a work in progress for a paradigm of wall-top displays for future offices, where instead of a small desktop, we treat the available walls as the desktop. Multiple projector-camera units, mounted on pan-tilt units (PTU), allow for the creation of the conglomeration of one or more high resolution displays, whose position, size, and aspect ratio can be changed by the user. This can be achieved by lucid gesture based interactions. Multiple wireless keyboard and mouse can be used to interact with the display(s) for shared collaborative or personal individual interactions. The system can also be extended to support stereoscopic projection and data input by superimposing projection displays and processing data from multiple cameras. This is achieved by a distributed network of projector-camera systems, and associated distributed registration and interaction methodologies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a work in progress for a paradigm of wall-top displays for future offices, where instead of a small desktop, we treat the available walls as the desktop. Multiple projector-camera units, mounted on pan-tilt units (PTU), allow for the creation of the conglomeration of one or more high resolution displays, whose position, size, and aspect ratio can be changed by the user. This can be achieved by lucid gesture based interactions. Multiple wireless keyboard and mouse can be used to interact with the display(s) for shared collaborative or personal individual interactions. The system can also be extended to support stereoscopic projection and data input by superimposing projection displays and processing data from multiple cameras. This is achieved by a distributed network of projector-camera systems, and associated distributed registration and interaction methodologies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a work in progress for a paradigm of wall-top displays for future offices, where instead of a small desktop, we treat the available walls as the desktop. Multiple projector-camera units, mounted on pan-tilt units (PTU), allow for the creation of the conglomeration of one or more high resolution displays, whose position, size, and aspect ratio can be changed by the user. This can be achieved by lucid gesture based interactions. Multiple wireless keyboard and mouse can be used to interact with the display(s) for shared collaborative or personal individual interactions. The system can also be extended to support stereoscopic projection and data input by superimposing projection displays and processing data from multiple cameras. This is achieved by a distributed network of projector-camera systems, and associated distributed registration and interaction methodologies.",
"fno": "07151687",
"keywords": [
"Cameras",
"Computers",
"Stereo Image Processing",
"Three Dimensional Displays",
"Keyboards",
"Collaboration",
"Visualization",
"D 4 7 Organization And Design Systems Distributed System"
],
"authors": [
{
"affiliation": "University of California, Irvine",
"fullName": "Duy-Quoc Lai",
"givenName": "Duy-Quoc",
"surname": "Lai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, Irvine",
"fullName": "Aditi Majumder",
"givenName": "Aditi",
"surname": "Majumder",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wevr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "5-9",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1725-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07151686",
"articleId": "12OmNzGDsMi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07151688",
"articleId": "12OmNqBtiGS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2004/2244/0/01410480",
"title": "A survey of multi-projector tiled display wall construction",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410480/12OmNAWH9up",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240281",
"title": "A Foveal Inset for Large Display Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240281/12OmNrkT7BT",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2005/2419/0/01550778",
"title": "Exploring interaction with a simulated wrist-worn projection display",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2005/01550778/12OmNwErpUu",
"parentPublication": {
"id": "proceedings/iswc/2005/2419/0",
"title": "Ninth IEEE International Symposium on Wearable Computers (ISWC'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2014/6854/0/6854a037",
"title": "Real-Time Light Field Rendering for Multi-projector 3D Display",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a037/12OmNzAohTG",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223434",
"title": "A multi-projector display system of arbitrary shape, size and resolution",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223434/12OmNzYNNiY",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446433",
"title": "A Calibration Method for Large-Scale Projection Based Floor Display System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446433/13bd1gJ1v0M",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2000/04/mcg2000040022",
"title": "Tutorial: Introduction to Building Projection-based Tiled Display Systems",
"doi": null,
"abstractUrl": "/magazine/cg/2000/04/mcg2000040022/13rRUwbJCZd",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/03/v0290",
"title": "Camera-Based Detection and Removal of Shadows from Interactive Multiprojector Displays",
"doi": null,
"abstractUrl": "/journal/tg/2004/03/v0290/13rRUxZRbnR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzuIjfJ",
"title": "2015 IEEE Blocks and Beyond Workshop (Blocks and Beyond)",
"acronym": "blocks-and-beyond",
"groupId": "1810644",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx8Ousc",
"doi": "10.1109/BLOCKS.2015.7369005",
"title": "Position paper: Towards making block-based programming accessible for blind users",
"normalizedTitle": "Position paper: Towards making block-based programming accessible for blind users",
"abstract": "Block-based programming environments are not accessible to users who are visually impaired. The lack of access impacts students who are participating in computing outreach, in the classroom, or in informal settings that foster interest in computing. This paper will discuss accessibility design issues in block-based programming environments, as well as present research questions and current design revisions being undertaken in Blockly.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Block-based programming environments are not accessible to users who are visually impaired. The lack of access impacts students who are participating in computing outreach, in the classroom, or in informal settings that foster interest in computing. This paper will discuss accessibility design issues in block-based programming environments, as well as present research questions and current design revisions being undertaken in Blockly.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Block-based programming environments are not accessible to users who are visually impaired. The lack of access impacts students who are participating in computing outreach, in the classroom, or in informal settings that foster interest in computing. This paper will discuss accessibility design issues in block-based programming environments, as well as present research questions and current design revisions being undertaken in Blockly.",
"fno": "07369005",
"keywords": [
"Keyboards",
"Navigation",
"Mice",
"Programming Environments",
"Visualization",
"Programming",
"Software",
"Visually Impaired",
"Accessibility",
"Aria",
"User Interface Design"
],
"authors": [
{
"affiliation": "Department of Software Engineering, Rochester Institute of Technology, Rochester, USA",
"fullName": "Stephanie Ludi",
"givenName": "Stephanie",
"surname": "Ludi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "blocks-and-beyond",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-10-01T00:00:00",
"pubType": "proceedings",
"pages": "67-69",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-8367-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07369004",
"articleId": "12OmNxGj9Qc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07369006",
"articleId": "12OmNCm7BIZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/b&b/2017/2480/0/08120404",
"title": "Tips for creating a block language with blockly",
"doi": null,
"abstractUrl": "/proceedings-article/b&b/2017/08120404/12OmNAlvHy3",
"parentPublication": {
"id": "proceedings/b&b/2017/2480/0",
"title": "2017 IEEE Blocks and Beyond Workshop (B&B)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/blocks-and-beyond/2015/8367/0/07369015",
"title": "Programming environments for blocks need first-class software refactoring support: A position paper",
"doi": null,
"abstractUrl": "/proceedings-article/blocks-and-beyond/2015/07369015/12OmNBaBuQY",
"parentPublication": {
"id": "proceedings/blocks-and-beyond/2015/8367/0",
"title": "2015 IEEE Blocks and Beyond Workshop (Blocks and Beyond)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/blocks-and-beyond/2015/8367/0/07369000",
"title": "Ten things we've learned from Blockly",
"doi": null,
"abstractUrl": "/proceedings-article/blocks-and-beyond/2015/07369000/12OmNs4S8LG",
"parentPublication": {
"id": "proceedings/blocks-and-beyond/2015/8367/0",
"title": "2015 IEEE Blocks and Beyond Workshop (Blocks and Beyond)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2015/7334/0/7334a413",
"title": "Alternative Concepts for Accessible Virtual Classrooms for Blind Users",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a413/12OmNwM6A3H",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/blocks-and-beyond/2015/8367/0/07369003",
"title": "Position paper: Lack of keyboard support cripples block-based programming",
"doi": null,
"abstractUrl": "/proceedings-article/blocks-and-beyond/2015/07369003/12OmNwfsI0L",
"parentPublication": {
"id": "proceedings/blocks-and-beyond/2015/8367/0",
"title": "2015 IEEE Blocks and Beyond Workshop (Blocks and Beyond)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/blocks-and-beyond/2015/8367/0/07369009",
"title": "Position paper: From interest to usefulness with BlockPy, a block-based, educational environment",
"doi": null,
"abstractUrl": "/proceedings-article/blocks-and-beyond/2015/07369009/12OmNxcdG1T",
"parentPublication": {
"id": "proceedings/blocks-and-beyond/2015/8367/0",
"title": "2015 IEEE Blocks and Beyond Workshop (Blocks and Beyond)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/b&b/2017/2480/0/08120414",
"title": "Position paper: Block-based programming should offer intelligent support for learners",
"doi": null,
"abstractUrl": "/proceedings-article/b&b/2017/08120414/12OmNznkJWt",
"parentPublication": {
"id": "proceedings/b&b/2017/2480/0",
"title": "2017 IEEE Blocks and Beyond Workshop (B&B)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2018/4235/0/08506557",
"title": "Evaluation of A Visual Programming Keyboard on Touchscreen Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2018/08506557/17D45We0UER",
"parentPublication": {
"id": "proceedings/vlhcc/2018/4235/0",
"title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/b&b/2019/4849/0/08941230",
"title": "Position: Accessible Block-Based Programming: Why and How",
"doi": null,
"abstractUrl": "/proceedings-article/b&b/2019/08941230/1gjRlLwoTkI",
"parentPublication": {
"id": "proceedings/b&b/2019/4849/0",
"title": "2019 IEEE Blocks and Beyond Workshop (B&B)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/b&b/2019/4849/0/08941222",
"title": "Position: IntelliBlox: A Toolkit for Integrating Block-Based Programming into Game-Based Learning Environments",
"doi": null,
"abstractUrl": "/proceedings-article/b&b/2019/08941222/1gjRlj6f5Qc",
"parentPublication": {
"id": "proceedings/b&b/2019/4849/0",
"title": "2019 IEEE Blocks and Beyond Workshop (B&B)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBUAvUQ",
"title": "Information Visualization, IEEE Symposium on",
"acronym": "ieee-infovis",
"groupId": "1000371",
"volume": "0",
"displayVolume": "0",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy50ggl",
"doi": "10.1109/INFVIS.2003.1249002",
"title": "Exploding the frame: designing for wall-size computer displays",
"normalizedTitle": "Exploding the frame: designing for wall-size computer displays",
"abstract": "High-resolution wall-size digital displays present significant new and different visual space to show and see imagery. The author has been working with two wall-size digital displays at Princeton University for five years and directing and producing IMAX films for a decade, and he has noted some unique design considerations for creating effective visual images when they are spread across entire walls. The author suggests these \"frameless\" screens - where images are so large we need to look around to see the entire field - need different ways of thinking about image design and visualization. Presenting such things as scale and detail take on new meaning when they can be displayed life-size and not shown in the context of one or many small frames such as we see everywhere. These design ideas will be of use for pervasive computing, interface research and design, interactive design, control design, representations of massive data sets, and creating effective displays of data for research and education.",
"abstracts": [
{
"abstractType": "Regular",
"content": "High-resolution wall-size digital displays present significant new and different visual space to show and see imagery. The author has been working with two wall-size digital displays at Princeton University for five years and directing and producing IMAX films for a decade, and he has noted some unique design considerations for creating effective visual images when they are spread across entire walls. The author suggests these \"frameless\" screens - where images are so large we need to look around to see the entire field - need different ways of thinking about image design and visualization. Presenting such things as scale and detail take on new meaning when they can be displayed life-size and not shown in the context of one or many small frames such as we see everywhere. These design ideas will be of use for pervasive computing, interface research and design, interactive design, control design, representations of massive data sets, and creating effective displays of data for research and education.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High-resolution wall-size digital displays present significant new and different visual space to show and see imagery. The author has been working with two wall-size digital displays at Princeton University for five years and directing and producing IMAX films for a decade, and he has noted some unique design considerations for creating effective visual images when they are spread across entire walls. The author suggests these \"frameless\" screens - where images are so large we need to look around to see the entire field - need different ways of thinking about image design and visualization. Presenting such things as scale and detail take on new meaning when they can be displayed life-size and not shown in the context of one or many small frames such as we see everywhere. These design ideas will be of use for pervasive computing, interface research and design, interactive design, control design, representations of massive data sets, and creating effective displays of data for research and education.",
"fno": "01249002",
"keywords": [
"Computer Displays",
"Screens Display",
"Wall Size Computer Displays",
"Wall Size Digital Displays",
"Visual Space",
"IMAX Films",
"Visual Images",
"Frameless Screens",
"Image Design",
"Image Visualization",
"Pervasive Computing",
"Interface Research",
"Interface Design",
"Interactive Design",
"Control Design",
"Data Set Representations",
"Data Displays",
"Display Walls",
"Tiled Displays",
"Visual Interface",
"Graphic Design",
"High Resolution Displays",
"Computer Displays",
"Production",
"Education",
"Large Screen Displays",
"Pervasive Computing",
"Control Design",
"Graphics",
"Data Visualization",
"Educational Institutions",
"Computer Science"
],
"authors": [
{
"affiliation": "Princeton Univ., NJ, USA",
"fullName": "B. Shedd",
"givenName": "B.",
"surname": "Shedd",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-infovis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-01-01T00:00:00",
"pubType": "proceedings",
"pages": "7",
"year": "2003",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01249000",
"articleId": "12OmNzT7Opi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01249015",
"articleId": "18M76LncjTO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/nbis/2009/3767/0/3767a612",
"title": "Study on Realistic Communication Technology with Tiled Displays Wall",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2009/3767a612/12OmNqBKUf2",
"parentPublication": {
"id": "proceedings/nbis/2009/3767/0",
"title": "2009 International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2009/3639/0/3639a554",
"title": "An Experiment on Tele-immersive Communication with Tiled Displays Wall over JGN2plus Network",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2009/3639a554/12OmNqJ8tuG",
"parentPublication": {
"id": "proceedings/waina/2009/3639/0",
"title": "2009 International Conference on Advanced Information Networking and Applications Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/caapwd/1992/2730/0/00217374",
"title": "Twenty-column visual displays",
"doi": null,
"abstractUrl": "/proceedings-article/caapwd/1992/00217374/12OmNqzcvJP",
"parentPublication": {
"id": "proceedings/caapwd/1992/2730/0",
"title": "Proceedings of the Johns Hopkins National Search for Computing Applications to Assist Persons with Disabilities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/infvis/2003/8154/0/01249002",
"title": "Exploding the frame: designing for wall-size computer displays",
"doi": null,
"abstractUrl": "/proceedings-article/infvis/2003/01249002/12OmNrYCXId",
"parentPublication": {
"id": "proceedings/infvis/2003/8154/0",
"title": "IEEE Symposium on Information Visualization 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2003/2055/0/20550032",
"title": "Exploding the Frame: Designing for Wall-Size Computer Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2003/20550032/12OmNxHryk6",
"parentPublication": {
"id": "proceedings/ieee-infovis/2003/2055/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811007",
"title": "Measurement Protocols for Medium-Field Distance Perception in Large-Screen Immersive Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811007/12OmNyeWdKg",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811033",
"title": "Creating Virtual 3D See-Through Experiences on Large-size 2D Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811033/12OmNzZEAqd",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122516",
"title": "Perception of Visual Variables on Tiled Wall-Sized Displays for Information Visualization Applications",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122516/13rRUwwJWFM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2006/04/r4096",
"title": "Interacting with Large Displays",
"doi": null,
"abstractUrl": "/magazine/co/2006/04/r4096/13rRUxjyX7d",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a011",
"title": "Collaborative Visual Analysis with Multi-level Information Sharing Using a Wall-Size Display and See-Through HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a011/1cMF7IJ33Lq",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyr8Ytt",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzgwmQF",
"doi": "10.1109/iV.2015.70",
"title": "A Gesture Control Framework Targeting High-Resolution Video Wall Displays",
"normalizedTitle": "A Gesture Control Framework Targeting High-Resolution Video Wall Displays",
"abstract": "This recent advances in display and user interface technologies create new opportunities for participatory urban planning. Whereas touch screens enable direct screen interactions for intuitive remodeling of urban simulations, high resolution video walls offer true-to-life or human-scale scenario visualizations. Multi-touch screen environments finally function as an interactive user interface for the entire urban simulation pipeline. However intuitive and preferably natural interactions for simple reconfiguration, overarching tool management or complex tool interaction remain a major challenge. For this reason we have implemented a gesture recognition framework that uses multimodal input devices to satisfy all gesture use cases and integrates a display management system for seamless interaction with our multi-screen infrastructure. To understand the implications of this novel approach, we conducted preliminary user trials with urban planning experts and technicians. The results indicated that the system provides a fluid and natural user experience for exploring and analyzing urban planning data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This recent advances in display and user interface technologies create new opportunities for participatory urban planning. Whereas touch screens enable direct screen interactions for intuitive remodeling of urban simulations, high resolution video walls offer true-to-life or human-scale scenario visualizations. Multi-touch screen environments finally function as an interactive user interface for the entire urban simulation pipeline. However intuitive and preferably natural interactions for simple reconfiguration, overarching tool management or complex tool interaction remain a major challenge. For this reason we have implemented a gesture recognition framework that uses multimodal input devices to satisfy all gesture use cases and integrates a display management system for seamless interaction with our multi-screen infrastructure. To understand the implications of this novel approach, we conducted preliminary user trials with urban planning experts and technicians. The results indicated that the system provides a fluid and natural user experience for exploring and analyzing urban planning data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This recent advances in display and user interface technologies create new opportunities for participatory urban planning. Whereas touch screens enable direct screen interactions for intuitive remodeling of urban simulations, high resolution video walls offer true-to-life or human-scale scenario visualizations. Multi-touch screen environments finally function as an interactive user interface for the entire urban simulation pipeline. However intuitive and preferably natural interactions for simple reconfiguration, overarching tool management or complex tool interaction remain a major challenge. For this reason we have implemented a gesture recognition framework that uses multimodal input devices to satisfy all gesture use cases and integrates a display management system for seamless interaction with our multi-screen infrastructure. To understand the implications of this novel approach, we conducted preliminary user trials with urban planning experts and technicians. The results indicated that the system provides a fluid and natural user experience for exploring and analyzing urban planning data.",
"fno": "7568a366",
"keywords": [
"Gesture Recognition",
"Mice",
"Visualization",
"Streaming Media",
"Navigation",
"Tablet Computers",
"Urban Planning",
"Wearable Sensor Armband",
"Human Computer Interaction",
"Natural Interaction",
"Gesture Recognition"
],
"authors": [
{
"affiliation": null,
"fullName": "Bernhard Klein",
"givenName": "Bernhard",
"surname": "Klein",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-07-01T00:00:00",
"pubType": "proceedings",
"pages": "366-371",
"year": "2015",
"issn": "1550-6037",
"isbn": "978-1-4673-7568-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7568a360",
"articleId": "12OmNApcugV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7568a372",
"articleId": "12OmNqGRGor",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vlhcc/2016/0252/0/07739698",
"title": "Tool demo: Operating diagram editors through unistroke gestures",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2016/07739698/12OmNBr4eL0",
"parentPublication": {
"id": "proceedings/vlhcc/2016/0252/0",
"title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2016/4155/0/4155a117",
"title": "Application Interface Structure Research Based on Touch Screen",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2016/4155a117/12OmNqGA5aG",
"parentPublication": {
"id": "proceedings/icris/2016/4155/0",
"title": "2016 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2016/4871/0/07916962",
"title": "Integration of Hand Gesture and Multi Touch Gesture with Glove Type Device",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2016/07916962/12OmNrGsDoo",
"parentPublication": {
"id": "proceedings/acit-csi/2016/4871/0",
"title": "2016 4th Intl. Conf. on Applied Computing and Information Technology (ACIT), 3rd Intl. Conf. on Computational Science/Intelligence and Applied Informatics (CSII), and 1st Intl. Conf. on Big Data, Cloud Computing, Data Science & Engineering (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cts/2016/2300/0/07871004",
"title": "Sketching Gesture-Based Applications in a Collaborative Working Environment with Wall-Sized Displays",
"doi": null,
"abstractUrl": "/proceedings-article/cts/2016/07871004/12OmNxbEtOu",
"parentPublication": {
"id": "proceedings/cts/2016/2300/0",
"title": "2016 International Conference on Collaboration Technologies and Systems (CTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfhr/2014/4335/0/06981030",
"title": "A Graph Modeling Strategy for Multi-touch Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icfhr/2014/06981030/12OmNyQpha3",
"parentPublication": {
"id": "proceedings/icfhr/2014/4335/0",
"title": "2014 14th International Conference on Frontiers in Handwriting Recognition (ICFHR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/socpar/2009/3879/0/3879a592",
"title": "A Simple Wearable Hand Gesture Recognition Device Using iMEMS",
"doi": null,
"abstractUrl": "/proceedings-article/socpar/2009/3879a592/12OmNykCccE",
"parentPublication": {
"id": "proceedings/socpar/2009/3879/0",
"title": "Soft Computing and Pattern Recognition, International Conference of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607588",
"title": "Effective hand segmentation and gesture recognition for browsing web pages on a large screen",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607588/12OmNyr8Yhe",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07516722",
"title": "Evaluating Multi-User Selection for Exploring Graph Topology on Wall-Displays",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07516722/13rRUwh80uD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2017/0443/0/08103485",
"title": "Approach to gesture-based editing of diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2017/08103485/17D45Xq6dC1",
"parentPublication": {
"id": "proceedings/vlhcc/2017/0443/0",
"title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0/148500b136",
"title": "Predicting Sex and Age using Swipe-Gesture Data from a Mobile Device",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2020/148500b136/1ua4FhHhZbq",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0",
"title": "2020 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKipK",
"title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"acronym": "vlhcc",
"groupId": "1001007",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XDIXT3",
"doi": "10.1109/VLHCC.2017.8103479",
"title": "Investigating uni-stroke gesture input for diagram editors on large wall-mounted touchscreens",
"normalizedTitle": "Investigating uni-stroke gesture input for diagram editors on large wall-mounted touchscreens",
"abstract": "Nowadays, touch-input devices are widely available. The use of such touch input devices, e.g., large wall-mounted touchscreens in (team) meeting rooms appear appropiate and desirable. Thus, team meetings can change from one-man-presentation-shows towards collaborative and interactive developing of plans and processes via designing and creating its corresponding diagrams. In this paper, we present an approach for interacting with diagrams using uni-stroke touch gestures. We focus on large wall-mounted touchscreens and present an editor design for such environments. In order to validate the usability of our approach we report on the results of a user study with a diagram editor for Business Process Modeling Networks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Nowadays, touch-input devices are widely available. The use of such touch input devices, e.g., large wall-mounted touchscreens in (team) meeting rooms appear appropiate and desirable. Thus, team meetings can change from one-man-presentation-shows towards collaborative and interactive developing of plans and processes via designing and creating its corresponding diagrams. In this paper, we present an approach for interacting with diagrams using uni-stroke touch gestures. We focus on large wall-mounted touchscreens and present an editor design for such environments. In order to validate the usability of our approach we report on the results of a user study with a diagram editor for Business Process Modeling Networks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Nowadays, touch-input devices are widely available. The use of such touch input devices, e.g., large wall-mounted touchscreens in (team) meeting rooms appear appropiate and desirable. Thus, team meetings can change from one-man-presentation-shows towards collaborative and interactive developing of plans and processes via designing and creating its corresponding diagrams. In this paper, we present an approach for interacting with diagrams using uni-stroke touch gestures. We focus on large wall-mounted touchscreens and present an editor design for such environments. In order to validate the usability of our approach we report on the results of a user study with a diagram editor for Business Process Modeling Networks.",
"fno": "08103479",
"keywords": [
"Visualization",
"Mice",
"Keyboards",
"Prototypes",
"Collaboration",
"Business",
"Diagram Editors",
"Single Touch Interaction",
"Unistroke Gestures"
],
"authors": [
{
"affiliation": "Universität der Bundeswehr München, Germany, Neubiberg, Germany",
"fullName": "Christian Schenk",
"givenName": "Christian",
"surname": "Schenk",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität der Bundeswehr München, Germany, Neubiberg, Germany",
"fullName": "Sonja Schimmler",
"givenName": "Sonja",
"surname": "Schimmler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität der Bundeswehr München, Germany, Neubiberg, Germany",
"fullName": "Mark Minas",
"givenName": "Mark",
"surname": "Minas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vlhcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "279-283",
"year": "2017",
"issn": "1943-6106",
"isbn": "978-1-5386-0443-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08103478",
"articleId": "17D45Wc1ILW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08103480",
"articleId": "17D45WB0qby",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vlhcc/2016/0252/0/07739698",
"title": "Tool demo: Operating diagram editors through unistroke gestures",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2016/07739698/12OmNBr4eL0",
"parentPublication": {
"id": "proceedings/vlhcc/2016/0252/0",
"title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2015/1725/0/07151687",
"title": "Interactive display conglomeration on the wall",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2015/07151687/12OmNs0C9Ln",
"parentPublication": {
"id": "proceedings/wevr/2015/1725/0",
"title": "2015 IEEE 1st Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2016/0252/0/07739659",
"title": "Operating diagram editors through unistroke gestures",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2016/07739659/12OmNyNQSOK",
"parentPublication": {
"id": "proceedings/vlhcc/2016/0252/0",
"title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a079",
"title": "[POSTER] Mutually Shared Gaze in Augmented Video Conference",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a079/12OmNyQYt9o",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/b&b/2017/2480/0/08120412",
"title": "Towards collaborative block-based programming on digital tabletops",
"doi": null,
"abstractUrl": "/proceedings-article/b&b/2017/08120412/12OmNzG4gyS",
"parentPublication": {
"id": "proceedings/b&b/2017/2480/0",
"title": "2017 IEEE Blocks and Beyond Workshop (B&B)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tabletop/2007/3013/0/04384112",
"title": "Living with a Tabletop: Analysis and Observations of Long Term Office Use of a Multi-Touch Table",
"doi": null,
"abstractUrl": "/proceedings-article/tabletop/2007/04384112/12OmNzy7uRs",
"parentPublication": {
"id": "proceedings/tabletop/2007/3013/0",
"title": "Horizontal Interactive Human-Computer Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07516722",
"title": "Evaluating Multi-User Selection for Exploring Graph Topology on Wall-Displays",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07516722/13rRUwh80uD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08263123",
"title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08263123/13rRUyft7D9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2017/0443/0/08103485",
"title": "Approach to gesture-based editing of diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2017/08103485/17D45Xq6dC1",
"parentPublication": {
"id": "proceedings/vlhcc/2017/0443/0",
"title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089533",
"title": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089533/1jIx7JtSOTC",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzVXNJh",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBCqbId",
"doi": "10.1109/3DUI.2015.7131725",
"title": "LazyNav: 3D ground navigation with non-critical body parts",
"normalizedTitle": "LazyNav: 3D ground navigation with non-critical body parts",
"abstract": "With the growing interest in natural input devices and virtual reality, mid-air ground navigation is becoming a fundamental interaction for a large collection of application scenarios. While classical input devices (e.g., mouse/keyboard, gamepad, touchscreen) have their own ground navigation standards, natural input techniques still lack acknowledged mechanisms for travelling in a 3D scene. In particular, for most applications, navigation is not the primary interaction. Thus, the user should navigate in the scene while still being able to perform other interactions with her hands, and observe the displayed content by moving her eyes and locally rotating her head. Since most ground navigation scenarios require only two degrees of freedom to move forward or backward and rotate the view to the left or to the right, we propose LazyNav a mid-air ground navigation control model which lets the users hands, eyes or local head orientation completely free, making use of a single pair of the remaining tracked body elements to tailor the navigation. To this end, we design several navigation body motions and study their desired properties, such as being easy to discover, easy to control, socially acceptable, accurate and not tiring. We also develop several assumptions about motions design for ground navigation and evaluate them. Finally, we highlight general advices on mid-air ground navigation techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the growing interest in natural input devices and virtual reality, mid-air ground navigation is becoming a fundamental interaction for a large collection of application scenarios. While classical input devices (e.g., mouse/keyboard, gamepad, touchscreen) have their own ground navigation standards, natural input techniques still lack acknowledged mechanisms for travelling in a 3D scene. In particular, for most applications, navigation is not the primary interaction. Thus, the user should navigate in the scene while still being able to perform other interactions with her hands, and observe the displayed content by moving her eyes and locally rotating her head. Since most ground navigation scenarios require only two degrees of freedom to move forward or backward and rotate the view to the left or to the right, we propose LazyNav a mid-air ground navigation control model which lets the users hands, eyes or local head orientation completely free, making use of a single pair of the remaining tracked body elements to tailor the navigation. To this end, we design several navigation body motions and study their desired properties, such as being easy to discover, easy to control, socially acceptable, accurate and not tiring. We also develop several assumptions about motions design for ground navigation and evaluate them. Finally, we highlight general advices on mid-air ground navigation techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the growing interest in natural input devices and virtual reality, mid-air ground navigation is becoming a fundamental interaction for a large collection of application scenarios. While classical input devices (e.g., mouse/keyboard, gamepad, touchscreen) have their own ground navigation standards, natural input techniques still lack acknowledged mechanisms for travelling in a 3D scene. In particular, for most applications, navigation is not the primary interaction. Thus, the user should navigate in the scene while still being able to perform other interactions with her hands, and observe the displayed content by moving her eyes and locally rotating her head. Since most ground navigation scenarios require only two degrees of freedom to move forward or backward and rotate the view to the left or to the right, we propose LazyNav a mid-air ground navigation control model which lets the users hands, eyes or local head orientation completely free, making use of a single pair of the remaining tracked body elements to tailor the navigation. To this end, we design several navigation body motions and study their desired properties, such as being easy to discover, easy to control, socially acceptable, accurate and not tiring. We also develop several assumptions about motions design for ground navigation and evaluate them. Finally, we highlight general advices on mid-air ground navigation techniques.",
"fno": "07131725",
"keywords": [
"Navigation",
"Hip",
"Knee",
"Three Dimensional Displays",
"Tracking",
"Legged Locomotion",
"Cameras"
],
"authors": [
{
"affiliation": "Telecom ParisTech - CNRS - Institut Mines-Telecom, France",
"fullName": "Emilie Guy",
"givenName": "Emilie",
"surname": "Guy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University, Japan",
"fullName": "Parinya Punpongsanon",
"givenName": "Parinya",
"surname": "Punpongsanon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University, Japan",
"fullName": "Daisuke Iwai",
"givenName": "Daisuke",
"surname": "Iwai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University, Japan",
"fullName": "Kosuke Sato",
"givenName": "Kosuke",
"surname": "Sato",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Telecom ParisTech - CNRS - Institut Mines-Telecom, France",
"fullName": "Tamy Boubekeur",
"givenName": "Tamy",
"surname": "Boubekeur",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "43-50",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-6886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07131724",
"articleId": "12OmNCzsKHZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07131726",
"articleId": "12OmNARiM2c",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2014/3624/0/06798845",
"title": "Feet movement in desktop 3D interaction",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798845/12OmNqC2uZJ",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459327",
"title": "Body-relative navigation guidance using uncalibrated cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459327/12OmNvJXezB",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2016/9041/0/9041a332",
"title": "Gesture-Based Learning for Preschooler: A Case Study of Teaching English Alphabet and Body Parts Vocabulary",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2016/9041a332/12OmNvo67Ac",
"parentPublication": {
"id": "proceedings/icalt/2016/9041/0",
"title": "2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eurosim/2013/5073/0/5073a226",
"title": "Multi-legged Walking Robot Modelling in MATLAB/SimmechanicsTM and Its Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/eurosim/2013/5073a226/12OmNyFU7bV",
"parentPublication": {
"id": "proceedings/eurosim/2013/5073/0",
"title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07501805",
"title": "Extended LazyNav: Virtual 3D Ground Navigation for Large Displays and Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07501805/13rRUyuegpb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a288",
"title": "A Bio-Inspired Musculoskeletal Model of the Lower Limb for Energy Economical Bipedal Walking",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a288/18M7ixxaQr6",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09761724",
"title": "Effects of Transfer Functions and Body Parts on Body-centric Locomotion in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09761724/1CKMkLCKOSk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900e620",
"title": "Coupling Vision and Proprioception for Navigation of Legged Robots",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900e620/1G56AR1YWqY",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b894",
"title": "Coupling Vision and Proprioception for Navigation of Legged Robots",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b894/1G56YFpGPNS",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7252",
"title": "Coupling Vision and Proprioception for Navigation of Legged Robots",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7252/1H0NRjnpTpe",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzahbRg",
"title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"acronym": "hpcc",
"groupId": "1002461",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNApLGsF",
"doi": "10.1109/HPCC-SmartCity-DSS.2016.0185",
"title": "Registration of Low Cost Maps within Large Scale MMS Maps",
"normalizedTitle": "Registration of Low Cost Maps within Large Scale MMS Maps",
"abstract": "High resolution 3D mapping of road systems is currently being carried out by expensive Mobile Mapping Systems (MMS) but coverage is limited. Recently Low Cost Sensor (LCS) systems have been developed which use common, low cost, internal MEMS position sensors from mobile phones, but such sensors come with a reduced absolute and relative positional accuracy. This study investigates the registration of LCS maps within MMS maps to improve map coverage and lower costs. MMS and LCS maps of a real world environment are made and registration is performed using feature matching and Iterative Closest Point alignment. Accuracy of ICP alignment is approximately (10cm) and local convergence is possible up to (1m). A combination of feature matching and ICP is used to demonstrate accurate alignment from an initial error of (10m). An example of a LCS map aligned within a MMS map is presented to confirm the use of LCS systems to extend 3D mapping coverage.",
"abstracts": [
{
"abstractType": "Regular",
"content": "High resolution 3D mapping of road systems is currently being carried out by expensive Mobile Mapping Systems (MMS) but coverage is limited. Recently Low Cost Sensor (LCS) systems have been developed which use common, low cost, internal MEMS position sensors from mobile phones, but such sensors come with a reduced absolute and relative positional accuracy. This study investigates the registration of LCS maps within MMS maps to improve map coverage and lower costs. MMS and LCS maps of a real world environment are made and registration is performed using feature matching and Iterative Closest Point alignment. Accuracy of ICP alignment is approximately (10cm) and local convergence is possible up to (1m). A combination of feature matching and ICP is used to demonstrate accurate alignment from an initial error of (10m). An example of a LCS map aligned within a MMS map is presented to confirm the use of LCS systems to extend 3D mapping coverage.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High resolution 3D mapping of road systems is currently being carried out by expensive Mobile Mapping Systems (MMS) but coverage is limited. Recently Low Cost Sensor (LCS) systems have been developed which use common, low cost, internal MEMS position sensors from mobile phones, but such sensors come with a reduced absolute and relative positional accuracy. This study investigates the registration of LCS maps within MMS maps to improve map coverage and lower costs. MMS and LCS maps of a real world environment are made and registration is performed using feature matching and Iterative Closest Point alignment. Accuracy of ICP alignment is approximately (10cm) and local convergence is possible up to (1m). A combination of feature matching and ICP is used to demonstrate accurate alignment from an initial error of (10m). An example of a LCS map aligned within a MMS map is presented to confirm the use of LCS systems to extend 3D mapping coverage.",
"fno": "07828526",
"keywords": [
"Convergence",
"Geography",
"Image Matching",
"Image Registration",
"Iterative Methods",
"Micromechanical Devices",
"Mobile Computing",
"Roads",
"Sensors",
"Stereo Image Processing",
"Local Convergence",
"Iterative Closest Point Alignment",
"Feature Matching",
"Mobile Phones",
"Internal MEMS Position Sensors",
"LCS",
"Low Cost Sensor Systems",
"Mobile Mapping Systems",
"Road Systems",
"High Resolution 3 D Mapping",
"Large Scale MMS Maps",
"Low Cost Map Registration",
"Three Dimensional Displays",
"Iterative Closest Point Algorithm",
"Global Positioning System",
"Vehicles",
"Trajectory",
"Roads",
"Robot Sensing Systems",
"3 D Mapping",
"Autonomous Vehicles"
],
"authors": [
{
"affiliation": null,
"fullName": "Simon Thompson",
"givenName": "Simon",
"surname": "Thompson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Masashi Yokozuka",
"givenName": "Masashi",
"surname": "Yokozuka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Naohisa Hashimoto",
"givenName": "Naohisa",
"surname": "Hashimoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Osamu Matsumoto",
"givenName": "Osamu",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hpcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "1304-1311",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-4297-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07828525",
"articleId": "12OmNwvDQsE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07828527",
"articleId": "12OmNC4eSFe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/svr/2013/5001/0/06655764",
"title": "A Marker-Free Calibration and Registration Process for Multiple Depth Maps from Structured Light Sensors and its Application in Video Avatar Systems",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2013/06655764/12OmNARRYwP",
"parentPublication": {
"id": "proceedings/svr/2013/5001/0",
"title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmip/2017/5954/0/5954a058",
"title": "A Log-Polar Feature Guided Iterative Closest Point Method for Image Registration",
"doi": null,
"abstractUrl": "/proceedings-article/icmip/2017/5954a058/12OmNBvkdlJ",
"parentPublication": {
"id": "proceedings/icmip/2017/5954/0",
"title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a232",
"title": "Efficient Large-Scale Point Cloud Registration Using Loop Closures",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a232/12OmNxA3YSC",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284641",
"title": "Ocular Fundus Blood Vessel Registration using Repeated Application of the ICP Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284641/12OmNyL0TP4",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034c495",
"title": "KPPF: Keypoint-Based Point-Pair-Feature for Scalable Automatic Global Registration of Large RGB-D Scans",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c495/12OmNyXMQo9",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836512",
"title": "Motion-Aware Iterative Closest Point Estimation for Fast Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836512/12OmNyxXlxR",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460636",
"title": "Sketch-based face alignment for thermal face recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460636/12OmNzlUKPq",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/11/07368945",
"title": "Go-ICP: A Globally Optimal Solution to 3D ICP Point-Set Registration",
"doi": null,
"abstractUrl": "/journal/tp/2016/11/07368945/13rRUwfZC1L",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3249",
"title": "Provably Approximated Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3249/1BmHRpR72Cc",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2021/1732/0/173200a407",
"title": "An Improved ICP Point Cloud Registration Algorithm Based on Three-Points Congruent Sets",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2021/173200a407/1BzTJDeh3Ms",
"parentPublication": {
"id": "proceedings/aiam/2021/1732/0",
"title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyQ7FQO",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"acronym": "iiai-aai",
"groupId": "1801921",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCxL9QL",
"doi": "10.1109/IIAI-AAI.2016.203",
"title": "A Map Database System for Route Navigation with Multiple Transit Points and Destination Points",
"normalizedTitle": "A Map Database System for Route Navigation with Multiple Transit Points and Destination Points",
"abstract": "This paper presents a map database system for route navigation. The system contains database describing roads, interest points (such as a bus stop, store, etc), and route images. The system also includes an index for efficient processing of the shortest path query. Given a set of candidate destination points and a set of candidate transit points, the system generates an index for shortest path query dynamically. Then, a computer system gets a starting point, and the system makes the shortest path that includes one of the candidate destination points and one of the candidate transit points. The k-shortest path (k-SPT) method is used to evaluate the shortest path query. The original version of k-SPT does not consider multiple number of destination points, then the implementation of k-SPT is modified.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a map database system for route navigation. The system contains database describing roads, interest points (such as a bus stop, store, etc), and route images. The system also includes an index for efficient processing of the shortest path query. Given a set of candidate destination points and a set of candidate transit points, the system generates an index for shortest path query dynamically. Then, a computer system gets a starting point, and the system makes the shortest path that includes one of the candidate destination points and one of the candidate transit points. The k-shortest path (k-SPT) method is used to evaluate the shortest path query. The original version of k-SPT does not consider multiple number of destination points, then the implementation of k-SPT is modified.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a map database system for route navigation. The system contains database describing roads, interest points (such as a bus stop, store, etc), and route images. The system also includes an index for efficient processing of the shortest path query. Given a set of candidate destination points and a set of candidate transit points, the system generates an index for shortest path query dynamically. Then, a computer system gets a starting point, and the system makes the shortest path that includes one of the candidate destination points and one of the candidate transit points. The k-shortest path (k-SPT) method is used to evaluate the shortest path query. The original version of k-SPT does not consider multiple number of destination points, then the implementation of k-SPT is modified.",
"fno": "8985a219",
"keywords": [
"Navigation",
"Roads",
"Indexes",
"Color",
"Image Edge Detection",
"K Shortest Path K SPT",
"Map Database",
"Shortest Path Navigation",
"One Way Road",
"Shortest Path Query"
],
"authors": [
{
"affiliation": null,
"fullName": "Kunihiko Kaneko",
"givenName": "Kunihiko",
"surname": "Kaneko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shinya Honda",
"givenName": "Shinya",
"surname": "Honda",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iiai-aai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "219-223",
"year": "2016",
"issn": null,
"isbn": "978-1-4673-8985-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8985a215",
"articleId": "12OmNzcPA9f",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8985a224",
"articleId": "12OmNvrdI0Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/smartcomp-workshops/2014/6447/0/07046669",
"title": "A dynamic path planning algorithm for multi-core navigation device",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp-workshops/2014/07046669/12OmNAoDilA",
"parentPublication": {
"id": "proceedings/smartcomp-workshops/2014/6447/0",
"title": "2014 International Conference on Smart Computing Workshops (SMARTCOMP Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccicc/2013/0783/0/06622277",
"title": "Flexible route planning for amusement parks navigation",
"doi": null,
"abstractUrl": "/proceedings-article/iccicc/2013/06622277/12OmNC1Y5nM",
"parentPublication": {
"id": "proceedings/iccicc/2013/0783/0",
"title": "2013 12th IEEE International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2010/7488/0/05663936",
"title": "VAN: Vehicle-assisted shortest-time path navigation",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2010/05663936/12OmNCcKQhW",
"parentPublication": {
"id": "proceedings/mass/2010/7488/0",
"title": "2010 IEEE 7th International Conference on Mobile Ad-Hoc and Sensor Systems (MASS 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2014/6239/0/6239a387",
"title": "Transport Route Planning for Mobile Tour Applications",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2014/6239a387/12OmNroij0Q",
"parentPublication": {
"id": "proceedings/cit/2014/6239/0",
"title": "2014 IEEE International Conference on Computer and Information Technology (CIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2014/4284/0/4284a222",
"title": "HAS: Hierarchical A-Star Algorithm for Big Map Navigation in Special Areas",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a222/12OmNvDI3XY",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcabes/2015/6593/0/6593a348",
"title": "An Improved Live-Wire Freed from the Restriction of the Direct Line Between Seed Points",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2015/6593a348/12OmNyL0TCy",
"parentPublication": {
"id": "proceedings/dcabes/2015/6593/0",
"title": "2015 14th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainaw/2008/3096/0/3096b551",
"title": "A Map Matching Algorithm for Car Navigation Systems that Predict User Destination",
"doi": null,
"abstractUrl": "/proceedings-article/ainaw/2008/3096b551/12OmNzZEAxc",
"parentPublication": {
"id": "proceedings/ainaw/2008/3096/0",
"title": "2008 22nd International Conference on Advanced Information Networking and Applications (AINA 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icie/2010/4080/2/05571356",
"title": "Implementation of Route Selection Function Based on Improved Floyd Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icie/2010/05571356/12OmNzlUKoM",
"parentPublication": {
"id": "proceedings/icie/2010/4080/2",
"title": "Information Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2014/04/06678359",
"title": "Towards Online Shortest Path Computation",
"doi": null,
"abstractUrl": "/journal/tk/2014/04/06678359/13rRUwIF6lx",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2022/5099/0/509900a588",
"title": "Efficient Navigation for Constrained Shortest Path with Adaptive Expansion Control",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2022/509900a588/1KpCKj3YygE",
"parentPublication": {
"id": "proceedings/icdm/2022/5099/0",
"title": "2022 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAndiq9",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"acronym": "pacificvis",
"groupId": "1001657",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs0kywM",
"doi": "10.1109/PacificVis.2013.6596123",
"title": "Constrained optimization for disoccluding geographic landmarks in 3D urban maps",
"normalizedTitle": "Constrained optimization for disoccluding geographic landmarks in 3D urban maps",
"abstract": "In composing hand-drawn 3D urban maps, the most common design problem is to avoid overlaps between geographic features such as roads and buildings by displacing them consistently over the map domain. Nonetheless, automating this map design process is still a challenging task because we have to maximally retain the 3D depth perception inherent in pairs of parallel lines embedded in the original layout of such geographic features. This paper presents a novel approach to disoccluding important geographic features when creating 3D urban maps for enhancing their visual readability. This is accomplished by formulating the design criteria as a constrained optimization problem based on the linear programming approach. Our mathematical formulation allows us to systematically eliminate occlusions of landmark roads and buildings, and further controls the degree of local 3D map deformation by devising an objective function to be minimized. Various design examples together with a user study are presented to demonstrate the robustness and feasibility of the proposed approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In composing hand-drawn 3D urban maps, the most common design problem is to avoid overlaps between geographic features such as roads and buildings by displacing them consistently over the map domain. Nonetheless, automating this map design process is still a challenging task because we have to maximally retain the 3D depth perception inherent in pairs of parallel lines embedded in the original layout of such geographic features. This paper presents a novel approach to disoccluding important geographic features when creating 3D urban maps for enhancing their visual readability. This is accomplished by formulating the design criteria as a constrained optimization problem based on the linear programming approach. Our mathematical formulation allows us to systematically eliminate occlusions of landmark roads and buildings, and further controls the degree of local 3D map deformation by devising an objective function to be minimized. Various design examples together with a user study are presented to demonstrate the robustness and feasibility of the proposed approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In composing hand-drawn 3D urban maps, the most common design problem is to avoid overlaps between geographic features such as roads and buildings by displacing them consistently over the map domain. Nonetheless, automating this map design process is still a challenging task because we have to maximally retain the 3D depth perception inherent in pairs of parallel lines embedded in the original layout of such geographic features. This paper presents a novel approach to disoccluding important geographic features when creating 3D urban maps for enhancing their visual readability. This is accomplished by formulating the design criteria as a constrained optimization problem based on the linear programming approach. Our mathematical formulation allows us to systematically eliminate occlusions of landmark roads and buildings, and further controls the degree of local 3D map deformation by devising an objective function to be minimized. Various design examples together with a user study are presented to demonstrate the robustness and feasibility of the proposed approach.",
"fno": "06596123",
"keywords": [
"Cartography",
"Computer Graphics",
"Geographic Information Systems",
"Linear Programming",
"Disoccluding Geographic Landmarks",
"Hand Drawn 3 D Urban Maps",
"Geographic Features",
"Roads",
"Buildings",
"Map Domain",
"Map Design Process",
"3 D Depth Perception",
"Parallel Lines",
"Visual Readability",
"Design Criteria",
"Constrained Optimization Problem",
"Linear Programming Approach",
"Mathematical Formulation",
"Occlusions",
"Local 3 D Map Deformation",
"Objective Function",
"Robustness",
"Buildings",
"Roads",
"Three Dimensional Displays",
"Optimization",
"Layout",
"Visualization",
"Linear Programming",
"I 3 8 Computer Graphics Applications"
],
"authors": [
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Daichi Hirono",
"givenName": "Daichi",
"surname": "Hirono",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Hsiang-Yun Wu",
"givenName": "Hsiang-Yun",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Masatoshi Arikawa",
"givenName": "Masatoshi",
"surname": "Arikawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Shigeo Takahashi",
"givenName": "Shigeo",
"surname": "Takahashi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pacificvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-02-01T00:00:00",
"pubType": "proceedings",
"pages": "17-24",
"year": "2013",
"issn": "2165-8765",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06596122",
"articleId": "12OmNzBwGnC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06596124",
"articleId": "12OmNwEJ0M4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/waina/2015/1775/0/1775a721",
"title": "Road-Oriented Geographic Routing Protocol for Urban Vehicular Ad Hoc Networks",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2015/1775a721/12OmNAlNiO3",
"parentPublication": {
"id": "proceedings/waina/2015/1775/0",
"title": "2015 IEEE 29th International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2018/4133/0/413301a257",
"title": "Accurate Fuel Estimates Using CAN Bus Data and 3D Maps",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2018/413301a257/12OmNBOUxms",
"parentPublication": {
"id": "proceedings/mdm/2018/4133/0",
"title": "2018 19th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icii/2001/7010/3/00983098",
"title": "Knowledge based pipeline network peeling off recognition method of maps",
"doi": null,
"abstractUrl": "/proceedings-article/icii/2001/00983098/12OmNBQkx41",
"parentPublication": {
"id": "proceedings/icii/2001/7010/3",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/1995/7128/1/71280253",
"title": "Conversion of high level information from scanned maps into geographic information systems",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/1995/71280253/12OmNqBbHRG",
"parentPublication": {
"id": "proceedings/icdar/1995/7128/1",
"title": "Proceedings of 3rd International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118d978",
"title": "Geometric Urban Geo-localization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118d978/12OmNym2bWG",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6270/2/00576936",
"title": "Automated acquisition of geographic information from scanned maps for GIS using frames and semantic networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576936/12OmNzxgHum",
"parentPublication": {
"id": "proceedings/icpr/1994/6270/2",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07530932",
"title": "Generating Multi-Destination Maps",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07530932/13rRUxASubC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icics/2018/6483/0/648300a295",
"title": "Integrating Hand Gesture Modelling and Virtual Reality for Urban Planning",
"doi": null,
"abstractUrl": "/proceedings-article/icics/2018/648300a295/146z4GiPEHK",
"parentPublication": {
"id": "proceedings/icics/2018/6483/0",
"title": "2018 International Conference on Intelligent Circuits and Systems (ICICS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a109",
"title": "Rapid Construction Algorithm of 3D Urban Road Network from Raster Maps",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a109/1ap5yDN9kw8",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2020/8432/0/843200a136",
"title": "Procedural Generation of Favela Layouts on Arbitrary Terrains",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2020/843200a136/1pQILndJA0E",
"parentPublication": {
"id": "proceedings/sbgames/2020/8432/0",
"title": "2020 19th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyQYteW",
"title": "2016 IEEE 41st Conference on Local Computer Networks (LCN)",
"acronym": "lcn",
"groupId": "1000419",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx6PiyX",
"doi": "10.1109/LCN.2016.62",
"title": "Maximum Likelihood Topology Maps for Wireless Sensor Networks Using an Automated Robot",
"normalizedTitle": "Maximum Likelihood Topology Maps for Wireless Sensor Networks Using an Automated Robot",
"abstract": "Topology maps represent the layout arrangement of nodes while maintaining the connectivity. As it is extracted using connectivity information only, it does not accurately represent the physical layout such as physical voids, shape, and relative distances among physical positions of sensor nodes. A novel concept Maximum Likelihood-Topology Maps for Wireless Sensor Networks is presented. As it is based on a packet reception probability function, which is sensitive to the distance, it represents the physical layout more accurately. In this paper, we use a binary matrix recorded by a mobile robot representing the reception of packets from sensor nodes by the mobile robot at different locations along the robots trajectory. Maximum likelihood topology coordinates are then extracted from the binary matrix by using a packet receiving probability function. Also, the robot trajectory is automated to avoid the obstacles and cover the entire network within least possible amount of time. The result shows that our algorithm generates topology maps for various network shapes under different environmental conditions accurately, and that it outperforms the existing algorithms by representing the physical layout of the network more accurately.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Topology maps represent the layout arrangement of nodes while maintaining the connectivity. As it is extracted using connectivity information only, it does not accurately represent the physical layout such as physical voids, shape, and relative distances among physical positions of sensor nodes. A novel concept Maximum Likelihood-Topology Maps for Wireless Sensor Networks is presented. As it is based on a packet reception probability function, which is sensitive to the distance, it represents the physical layout more accurately. In this paper, we use a binary matrix recorded by a mobile robot representing the reception of packets from sensor nodes by the mobile robot at different locations along the robots trajectory. Maximum likelihood topology coordinates are then extracted from the binary matrix by using a packet receiving probability function. Also, the robot trajectory is automated to avoid the obstacles and cover the entire network within least possible amount of time. The result shows that our algorithm generates topology maps for various network shapes under different environmental conditions accurately, and that it outperforms the existing algorithms by representing the physical layout of the network more accurately.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Topology maps represent the layout arrangement of nodes while maintaining the connectivity. As it is extracted using connectivity information only, it does not accurately represent the physical layout such as physical voids, shape, and relative distances among physical positions of sensor nodes. A novel concept Maximum Likelihood-Topology Maps for Wireless Sensor Networks is presented. As it is based on a packet reception probability function, which is sensitive to the distance, it represents the physical layout more accurately. In this paper, we use a binary matrix recorded by a mobile robot representing the reception of packets from sensor nodes by the mobile robot at different locations along the robots trajectory. Maximum likelihood topology coordinates are then extracted from the binary matrix by using a packet receiving probability function. Also, the robot trajectory is automated to avoid the obstacles and cover the entire network within least possible amount of time. The result shows that our algorithm generates topology maps for various network shapes under different environmental conditions accurately, and that it outperforms the existing algorithms by representing the physical layout of the network more accurately.",
"fno": "2054a339",
"keywords": [
"Robot Sensing Systems",
"Topology",
"Network Topology",
"Robot Kinematics",
"Layout",
"Trajectory",
"Localization",
"Packet Receiving Probability",
"Signal Propagation",
"Topological Map",
"Wireless Sensor Network"
],
"authors": [
{
"affiliation": null,
"fullName": "Ashanie Gunathillake",
"givenName": "Ashanie",
"surname": "Gunathillake",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Andrey V. Savkin",
"givenName": "Andrey V.",
"surname": "Savkin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Anura P. Jayasumana",
"givenName": "Anura P.",
"surname": "Jayasumana",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "lcn",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-11-01T00:00:00",
"pubType": "proceedings",
"pages": "339-347",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-2054-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2054a333",
"articleId": "12OmNvjgWPH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2054a348",
"articleId": "12OmNyprns1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/lcn/2017/6523/0/6523a453",
"title": "Topology Maps for 3D Millimeter Wave Sensor Networks with Directional Antennas",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2017/6523a453/12OmNAolGST",
"parentPublication": {
"id": "proceedings/lcn/2017/6523/0",
"title": "2017 IEEE 42nd Conference on Local Computer Networks (LCN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icraect/2017/6701/0/6701a043",
"title": "Obstacle Avoiding Robot: A Lego EV3 Robot",
"doi": null,
"abstractUrl": "/proceedings-article/icraect/2017/6701a043/12OmNBC8AyV",
"parentPublication": {
"id": "proceedings/icraect/2017/6701/0",
"title": "2017 International Conference on Recent Advances in Electronics and Communication Technology (ICRAECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciicii/2015/8312/0/8312a116",
"title": "An AOP-Based Robot Behaviors Safety Checking Method",
"doi": null,
"abstractUrl": "/proceedings-article/iciicii/2015/8312a116/12OmNBTawqd",
"parentPublication": {
"id": "proceedings/iciicii/2015/8312/0",
"title": "2015 International Conference on Industrial Informatics - Computing Technology, Intelligent Technology, Industrial Information Integration (ICIICII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2012/0430/0/06386466",
"title": "A robot platform for unmanned weeding in a paddy field using sensor fusion",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386466/12OmNrIJqus",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a978",
"title": "Building Optimal Radio-Frequency Signal Maps",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a978/12OmNviZlLF",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2012/0430/0/06386475",
"title": "Multi-robot exploration with communication requirement to a moving base station",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386475/12OmNx57HL1",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2017/3337/0/08171613",
"title": "Resilient hexapod robot",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2017/08171613/12OmNxcdG3L",
"parentPublication": {
"id": "proceedings/icat/2017/3337/0",
"title": "2017 XXVI International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcnw/2016/2347/0/2347a173",
"title": "Decentralized Time-Based Target Searching Algorithm Using Sensor Network Topology Maps",
"doi": null,
"abstractUrl": "/proceedings-article/lcnw/2016/2347a173/12OmNxymoaR",
"parentPublication": {
"id": "proceedings/lcnw/2016/2347/0",
"title": "2016 IEEE 41st Conference on Local Computer Networks Workshops (LCN Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2017/4879/0/4879a074",
"title": "A Mirror World-Based Robot Control System",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2017/4879a074/12OmNyQpgN2",
"parentPublication": {
"id": "proceedings/icise/2017/4879/0",
"title": "2017 Second International Conference on Information Systems Engineering (ICISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2017/6724/0/07926524",
"title": "Multi-robot Planning for Non-overlapping Operator Attention Allocation",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926524/12OmNzwZ6p9",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwekjuM",
"title": "9th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "1988",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxdm4Ax",
"doi": "10.1109/ICPR.1988.28282",
"title": "Representing a global map for a mobile robot with relational local maps from sensory data",
"normalizedTitle": "Representing a global map for a mobile robot with relational local maps from sensory data",
"abstract": "A method is proposed for representing a global map for a mobile robot by using the descriptions of local maps and their relation. Sensor maps viewed at different locations close to each other are transferred into a local map represented in the object-centered coordinate system. First, the 3-D information of the edges on the floor is obtained at each sensor map (a view) by assuming the camera model and the flatness of the floor. A reliable feature is selected as a reference in the local map on which other edges are mapped. During the motion of the robot, the local map is updated by a motion stereo method until the current reference point disappears from a view. Farther edges must be represented in other local maps when the robot approaches them, since they cannot be located as precisely as closer edges can. Finally, the relationship between local maps in the context of the global map is described. The method has been tested on an indoor scene, and the experimental results are shown.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "A method is proposed for representing a global map for a mobile robot by using the descriptions of local maps and their relation. Sensor maps viewed at different locations close to each other are transferred into a local map represented in the object-centered coordinate system. First, the 3-D information of the edges on the floor is obtained at each sensor map (a view) by assuming the camera model and the flatness of the floor. A reliable feature is selected as a reference in the local map on which other edges are mapped. During the motion of the robot, the local map is updated by a motion stereo method until the current reference point disappears from a view. Farther edges must be represented in other local maps when the robot approaches them, since they cannot be located as precisely as closer edges can. Finally, the relationship between local maps in the context of the global map is described. The method has been tested on an indoor scene, and the experimental results are shown.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A method is proposed for representing a global map for a mobile robot by using the descriptions of local maps and their relation. Sensor maps viewed at different locations close to each other are transferred into a local map represented in the object-centered coordinate system. First, the 3-D information of the edges on the floor is obtained at each sensor map (a view) by assuming the camera model and the flatness of the floor. A reliable feature is selected as a reference in the local map on which other edges are mapped. During the motion of the robot, the local map is updated by a motion stereo method until the current reference point disappears from a view. Farther edges must be represented in other local maps when the robot approaches them, since they cannot be located as precisely as closer edges can. Finally, the relationship between local maps in the context of the global map is described. The method has been tested on an indoor scene, and the experimental results are shown.",
"fno": "00028282",
"keywords": [
"Computer Vision",
"Computerised Navigation",
"Computerised Pattern Recognition",
"Mobile Robots",
"Global Map Representation",
"Edge Detection",
"Robot Vision",
"Computerised Navigation",
"Computerised Pattern Recognition",
"Computer Vision",
"Mobile Robot",
"Relational Local Maps",
"Sensory Data",
"Object Centered Coordinate System",
"Camera Model",
"Motion Stereo",
"Mobile Robots",
"Robot Kinematics",
"Layout",
"Sonar Navigation",
"Robot Sensing Systems",
"Intelligent Sensors",
"Roads",
"Control Engineering",
"Sensor Systems",
"Robot Vision Systems"
],
"authors": [
{
"affiliation": "Dept. of Control Eng., Osaka Univ., Japan",
"fullName": "M. Asada",
"givenName": "M.",
"surname": "Asada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Y. Fukui",
"givenName": "Y.",
"surname": "Fukui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "S. Tsuji",
"givenName": "S.",
"surname": "Tsuji",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1988-01-01T00:00:00",
"pubType": "proceedings",
"pages": "520,521,522,523,524",
"year": "1988",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00028281",
"articleId": "12OmNyRPgrL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00028283",
"articleId": "12OmNyRPgDo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hpcc/2016/4297/0/07828526",
"title": "Registration of Low Cost Maps within Large Scale MMS Maps",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc/2016/07828526/12OmNApLGsF",
"parentPublication": {
"id": "proceedings/hpcc/2016/4297/0",
"title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2013/5159/0/06726394",
"title": "Application of Electronic Compass and Vision-Based Camera in Robot Navigation and Map Building",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2013/06726394/12OmNs4S8DQ",
"parentPublication": {
"id": "proceedings/msn/2013/5159/0",
"title": "2013 Ninth International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a978",
"title": "Building Optimal Radio-Frequency Signal Maps",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a978/12OmNviZlLF",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fitme/2009/5339/0/05380968",
"title": "Integrating Line Segment Based Maps in Multi-Robots Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/fitme/2009/05380968/12OmNxAlA2P",
"parentPublication": {
"id": "proceedings/fitme/2009/5339/0",
"title": "2009 Second International Conference on Future Information Technology and Management Engineering (FITME 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131654",
"title": "Real-time vision-based robot localization",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131654/12OmNy5R3Eg",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isic/1988/2012/0/00065419",
"title": "Locating a mobile robot using local observations and a global satellite map",
"doi": null,
"abstractUrl": "/proceedings-article/isic/1988/00065419/12OmNyQGS60",
"parentPublication": {
"id": "proceedings/isic/1988/2012/0",
"title": "Proceedings 1988 IEEE International Symposium on Intelligent Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012177",
"title": "Building a 3D world model for mobile robot from sensory data",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012177/12OmNyprnpu",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761036",
"title": "Merging maps of multiple robots",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761036/12OmNz61d9b",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1990/2057/0/00139591",
"title": "Omni-directional stereo for making global map",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1990/00139591/12OmNzdoN8o",
"parentPublication": {
"id": "proceedings/iccv/1990/2057/0",
"title": "Proceedings Third International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07530932",
"title": "Generating Multi-Destination Maps",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07530932/13rRUxASubC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwDAC4K",
"title": "2013 29th IEEE International Conference on Data Engineering (ICDE 2013)",
"acronym": "icde",
"groupId": "1000178",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyKrHkH",
"doi": "10.1109/ICDE.2013.6544830",
"title": "Destination prediction by sub-trajectory synthesis and privacy protection against such prediction",
"normalizedTitle": "Destination prediction by sub-trajectory synthesis and privacy protection against such prediction",
"abstract": "Destination prediction is an essential task for many emerging location based applications such as recommending sightseeing places and targeted advertising based on destination. A common approach to destination prediction is to derive the probability of a location being the destination based on historical trajectories. However, existing techniques using this approach suffer from the “data sparsity problem”, i.e., the available historical trajectories is far from being able to cover all possible trajectories. This problem considerably limits the number of query trajectories that can obtain predicted destinations. We propose a novel method named Sub-Trajectory Synthesis (SubSyn) algorithm to address the data sparsity problem. SubSyn algorithm first decomposes historical trajectories into sub-trajectories comprising two neighbouring locations, and then connects the sub-trajectories into “synthesised” trajectories. The number of query trajectories that can have predicted destinations is exponentially increased by this means. Experiments based on real datasets show that SubSyn algorithm can predict destinations for up to ten times more query trajectories than a baseline algorithm while the SubSyn prediction algorithm runs over two orders of magnitude faster than the baseline algorithm. In this paper, we also consider the privacy protection issue in case an adversary uses SubSyn algorithm to derive sensitive location information of users. We propose an efficient algorithm to select a minimum number of locations a user has to hide on her trajectory in order to avoid privacy leak. Experiments also validate the high efficiency of the privacy protection algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Destination prediction is an essential task for many emerging location based applications such as recommending sightseeing places and targeted advertising based on destination. A common approach to destination prediction is to derive the probability of a location being the destination based on historical trajectories. However, existing techniques using this approach suffer from the “data sparsity problem”, i.e., the available historical trajectories is far from being able to cover all possible trajectories. This problem considerably limits the number of query trajectories that can obtain predicted destinations. We propose a novel method named Sub-Trajectory Synthesis (SubSyn) algorithm to address the data sparsity problem. SubSyn algorithm first decomposes historical trajectories into sub-trajectories comprising two neighbouring locations, and then connects the sub-trajectories into “synthesised” trajectories. The number of query trajectories that can have predicted destinations is exponentially increased by this means. Experiments based on real datasets show that SubSyn algorithm can predict destinations for up to ten times more query trajectories than a baseline algorithm while the SubSyn prediction algorithm runs over two orders of magnitude faster than the baseline algorithm. In this paper, we also consider the privacy protection issue in case an adversary uses SubSyn algorithm to derive sensitive location information of users. We propose an efficient algorithm to select a minimum number of locations a user has to hide on her trajectory in order to avoid privacy leak. Experiments also validate the high efficiency of the privacy protection algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Destination prediction is an essential task for many emerging location based applications such as recommending sightseeing places and targeted advertising based on destination. A common approach to destination prediction is to derive the probability of a location being the destination based on historical trajectories. However, existing techniques using this approach suffer from the “data sparsity problem”, i.e., the available historical trajectories is far from being able to cover all possible trajectories. This problem considerably limits the number of query trajectories that can obtain predicted destinations. We propose a novel method named Sub-Trajectory Synthesis (SubSyn) algorithm to address the data sparsity problem. SubSyn algorithm first decomposes historical trajectories into sub-trajectories comprising two neighbouring locations, and then connects the sub-trajectories into “synthesised” trajectories. The number of query trajectories that can have predicted destinations is exponentially increased by this means. Experiments based on real datasets show that SubSyn algorithm can predict destinations for up to ten times more query trajectories than a baseline algorithm while the SubSyn prediction algorithm runs over two orders of magnitude faster than the baseline algorithm. In this paper, we also consider the privacy protection issue in case an adversary uses SubSyn algorithm to derive sensitive location information of users. We propose an efficient algorithm to select a minimum number of locations a user has to hide on her trajectory in order to avoid privacy leak. Experiments also validate the high efficiency of the privacy protection algorithm.",
"fno": "06544830",
"keywords": [
"Trajectory",
"Prediction Algorithms",
"Privacy",
"Markov Processes",
"Computational Modeling",
"Bayes Methods",
"Roads"
],
"authors": [
{
"affiliation": "Univ. of Melbourne, Melbourne, VIC, Australia",
"fullName": "A. Y. Xue",
"givenName": "A. Y.",
"surname": "Xue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Melbourne, Melbourne, VIC, Australia",
"fullName": "Rui Zhang",
"givenName": null,
"surname": "Rui Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Res. Asia, Beijing, China",
"fullName": "Yu Zheng",
"givenName": null,
"surname": "Yu Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Res. Asia, Beijing, China",
"fullName": "Xing Xie",
"givenName": null,
"surname": "Xing Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Melbourne, Melbourne, VIC, Australia",
"fullName": "Jin Huang",
"givenName": null,
"surname": "Jin Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Melbourne, Melbourne, VIC, Australia",
"fullName": "Zhenghua Xu",
"givenName": null,
"surname": "Zhenghua Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icde",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-04-01T00:00:00",
"pubType": "proceedings",
"pages": "254-265",
"year": "2013",
"issn": "1063-6382",
"isbn": "978-1-4673-4909-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06544829",
"articleId": "12OmNzSQdo9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06544831",
"articleId": "12OmNz5JBQe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/smartcomp-workshops/2014/6447/0/07046673",
"title": "DCTP: Data collecting based on trajectory prediction in Smart Environment",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp-workshops/2014/07046673/12OmNBSjJ76",
"parentPublication": {
"id": "proceedings/smartcomp-workshops/2014/6447/0",
"title": "2014 International Conference on Smart Computing Workshops (SMARTCOMP Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2018/4133/0/413301a135",
"title": "Origin-Destination Trajectory Diversity Analysis: Efficient Top-k Diversified Search",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2018/413301a135/12OmNrAMF5c",
"parentPublication": {
"id": "proceedings/mdm/2018/4133/0",
"title": "2018 19th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07364113",
"title": "Taxi trip time prediction using similar trips and road network data",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07364113/12OmNrMZpzH",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2018/03/08333732",
"title": "MobiT: Distributed and Congestion-Resilient Trajectory-Based Routing for Vehicular Delay Tolerant Networks",
"doi": null,
"abstractUrl": "/journal/nt/2018/03/08333732/13rRUEgs2RS",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07530932",
"title": "Generating Multi-Destination Maps",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07530932/13rRUxASubC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2020/02/08550645",
"title": "An Efficient Destination Prediction Approach Based on Future Trajectory Prediction and Transition Matrix Optimization",
"doi": null,
"abstractUrl": "/journal/tk/2020/02/08550645/17D45VTRowi",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-iucc/2017/3790/0/379001b261",
"title": "A Trajectory Prediction Method with Sparsity Data",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-iucc/2017/379001b261/17D45XeKgvB",
"parentPublication": {
"id": "proceedings/ispa-iucc/2017/3790/0",
"title": "2017 IEEE International Symposium on Parallel and Distributed Processing with Applications and 2017 IEEE International Conference on Ubiquitous Computing and Communications (ISPA/IUCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671813",
"title": "Modelling of Destinations for Data-driven Pedestrian Trajectory Prediction in Public Buildings",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671813/1A8hldZKjKg",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a174",
"title": "Visual Analytics of Taxi Trajectory Data via Topical Sub-trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a174/1cMF7meccAo",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/03/08812906",
"title": "Forecasting Gathering Events through Trajectory Destination Prediction: A Dynamic Hybrid Model",
"doi": null,
"abstractUrl": "/journal/tk/2021/03/08812906/1cPWD1Dgs8g",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzy7uNY",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"acronym": "icmtma",
"groupId": "1002837",
"volume": "3",
"displayVolume": "3",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBtiZG",
"doi": "10.1109/ICMTMA.2010.787",
"title": "Simulation Study on the Characteristics of Carbon-Fiber-Reinforced Plastics in Electromagnetic Tomography Nondestructive Evaluation Systems",
"normalizedTitle": "Simulation Study on the Characteristics of Carbon-Fiber-Reinforced Plastics in Electromagnetic Tomography Nondestructive Evaluation Systems",
"abstract": "Simulations of carbon-fiber-reinforced plastics (CFRP) using the finite element method in electromagnetic tomography nondestructive evaluation systems are presented in this paper. Ansoft Maxwell 3D models describing the interaction of the sensors with both unidirectional and cross-woven plate samples are built and analyzed to show the CFRP orientations. A circular sensors array is designed and 400 test holes are punched through the CFRP plate respectively in order to acquire the sensitivity matrix data based on which the image of the plate can be reconstructed. Calculation shows that the proposed method can roughly sense the damaged area.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Simulations of carbon-fiber-reinforced plastics (CFRP) using the finite element method in electromagnetic tomography nondestructive evaluation systems are presented in this paper. Ansoft Maxwell 3D models describing the interaction of the sensors with both unidirectional and cross-woven plate samples are built and analyzed to show the CFRP orientations. A circular sensors array is designed and 400 test holes are punched through the CFRP plate respectively in order to acquire the sensitivity matrix data based on which the image of the plate can be reconstructed. Calculation shows that the proposed method can roughly sense the damaged area.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Simulations of carbon-fiber-reinforced plastics (CFRP) using the finite element method in electromagnetic tomography nondestructive evaluation systems are presented in this paper. Ansoft Maxwell 3D models describing the interaction of the sensors with both unidirectional and cross-woven plate samples are built and analyzed to show the CFRP orientations. A circular sensors array is designed and 400 test holes are punched through the CFRP plate respectively in order to acquire the sensitivity matrix data based on which the image of the plate can be reconstructed. Calculation shows that the proposed method can roughly sense the damaged area.",
"fno": "3962e382",
"keywords": [
"Carbon Fiber Reinforced Plastics CFRP",
"Finite Element",
"Electromagnetic Tomography EMT",
"Nondestructive Evaluation NDE",
"Sensitivity Matrix"
],
"authors": [
{
"affiliation": null,
"fullName": "Ze Liu",
"givenName": "Ze",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu Xu",
"givenName": "Yu",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaofei Zhang",
"givenName": "Xiaofei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yali Pei",
"givenName": "Yali",
"surname": "Pei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yiping Cheng",
"givenName": "Yiping",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wuliang Yin",
"givenName": "Wuliang",
"surname": "Yin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmtma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-03-01T00:00:00",
"pubType": "proceedings",
"pages": "382-385",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-3962-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3962e378",
"articleId": "12OmNvT2oQh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3962e386",
"articleId": "12OmNApu5Ji",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ams/2012/4730/0/4730a231",
"title": "FDTD Method for the Electromagnetic Transient Behavior of Carbon Fiber Reinforced Plastic",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2012/4730a231/12OmNBqMDEJ",
"parentPublication": {
"id": "proceedings/ams/2012/4730/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecodim/2003/8590/0/01322717",
"title": "Effect of lightened automobiles on the environmental problem due to Asian motorization",
"doi": null,
"abstractUrl": "/proceedings-article/ecodim/2003/01322717/12OmNrAMENU",
"parentPublication": {
"id": "proceedings/ecodim/2003/8590/0",
"title": "2003. 3rd International Symposium on Environmentally Conscious Design and Inverse Manufacturing - EcoDesign'03",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2010/4212/1/4212a709",
"title": "Experimental Study on Reinforced Concrete Column Strengthened by CFRP under Bidirectional Eccentric Load",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2010/4212a709/12OmNvStcvG",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiea/2020/8288/0/828800a771",
"title": "Finite Element Analysis of the Thermal Conductivity and the Specific Heat of Carbon Fiber Reinforced Plastic (CFRP) Composites",
"doi": null,
"abstractUrl": "/proceedings-article/aiea/2020/828800a771/1nTul2L8zjG",
"parentPublication": {
"id": "proceedings/aiea/2020/8288/0",
"title": "2020 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1I8wxXsB9yE",
"title": "2022 8th International Conference on Mechanical Engineering and Automation Science (ICMEAS)",
"acronym": "icmeas",
"groupId": "9939174",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1I8wEqwX9ks",
"doi": "10.1109/ICMEAS57305.2022.00041",
"title": "Intralaminar Crack Detection for Carbon Fiber Reinforced Polymers Based on Laser-Line Thermography",
"normalizedTitle": "Intralaminar Crack Detection for Carbon Fiber Reinforced Polymers Based on Laser-Line Thermography",
"abstract": "As Carbon fiber reinforced polymers (CFRP) are widely used in aerospace and marine engineering and other important fields, finding more efficient and rapid non-destructive testing (NDT) techniques to ensure the safety and reliability of CFRP composite materials has been the focus of research in the field of NDT. However, the current NDT techniques for carbon fiber reinforced polymer (CFRP) mainly detect defects that are parallel to the material surface, and there are few studies on defects that are perpendicular to the material surface, such as matrix cracking. In this paper, the laser-line thermography is compared with the lock-in thermography in time domain and spatial domain, and the effect of the main experimental parameters of laser thermography on the detection results is investigated. The results show that laser-line thermography can identify the characteristic size and geometric position of CFRP defects more accurately and significantly improve the imaging quality of defects. The analysis of the simulation results can conclude that the position corresponding to the temperature change peak under laser-line thermography is basically consistent with the boundary position of the defect, which can be used as a characteristic parameter for defect identification. When using laser-line thermography for material inspection, using a higher laser power and a shorter laser pulse duration can improve the imaging quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As Carbon fiber reinforced polymers (CFRP) are widely used in aerospace and marine engineering and other important fields, finding more efficient and rapid non-destructive testing (NDT) techniques to ensure the safety and reliability of CFRP composite materials has been the focus of research in the field of NDT. However, the current NDT techniques for carbon fiber reinforced polymer (CFRP) mainly detect defects that are parallel to the material surface, and there are few studies on defects that are perpendicular to the material surface, such as matrix cracking. In this paper, the laser-line thermography is compared with the lock-in thermography in time domain and spatial domain, and the effect of the main experimental parameters of laser thermography on the detection results is investigated. The results show that laser-line thermography can identify the characteristic size and geometric position of CFRP defects more accurately and significantly improve the imaging quality of defects. The analysis of the simulation results can conclude that the position corresponding to the temperature change peak under laser-line thermography is basically consistent with the boundary position of the defect, which can be used as a characteristic parameter for defect identification. When using laser-line thermography for material inspection, using a higher laser power and a shorter laser pulse duration can improve the imaging quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As Carbon fiber reinforced polymers (CFRP) are widely used in aerospace and marine engineering and other important fields, finding more efficient and rapid non-destructive testing (NDT) techniques to ensure the safety and reliability of CFRP composite materials has been the focus of research in the field of NDT. However, the current NDT techniques for carbon fiber reinforced polymer (CFRP) mainly detect defects that are parallel to the material surface, and there are few studies on defects that are perpendicular to the material surface, such as matrix cracking. In this paper, the laser-line thermography is compared with the lock-in thermography in time domain and spatial domain, and the effect of the main experimental parameters of laser thermography on the detection results is investigated. The results show that laser-line thermography can identify the characteristic size and geometric position of CFRP defects more accurately and significantly improve the imaging quality of defects. The analysis of the simulation results can conclude that the position corresponding to the temperature change peak under laser-line thermography is basically consistent with the boundary position of the defect, which can be used as a characteristic parameter for defect identification. When using laser-line thermography for material inspection, using a higher laser power and a shorter laser pulse duration can improve the imaging quality.",
"fno": "630500a178",
"keywords": [
"Carbon Fibre Reinforced Composites",
"Carbon Fibre Reinforced Plastics",
"Composite Materials",
"Crack Detection",
"Cracks",
"Eddy Current Testing",
"Flaw Detection",
"Infrared Imaging",
"Inspection",
"Nondestructive Testing",
"Surface Cracks",
"Ultrasonic Materials Testing",
"Intralaminar Crack Detection",
"Carbon Fiber Reinforced Polymers",
"Laser Line Thermography",
"Nondestructive Testing Techniques",
"CFRP Composite Materials",
"Current NDT Techniques",
"Material Surface",
"Laser Thermography",
"CFRP Defects",
"Temperature Distribution",
"Laser Theory",
"Simulation",
"Power Lasers",
"Imaging",
"Plastics",
"Surface Cracks",
"CFRP",
"Laser Line Thermography",
"Non Destructive Testing",
"Finite Element Simulation"
],
"authors": [
{
"affiliation": "Xi’an Jiaotong University,State Key Laboratory For Manufacturing Systems Engineering,Xi’an,China",
"fullName": "Chunman Liu",
"givenName": "Chunman",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong University,International Joint Research Laboratory for Micro/Nano Manufacturing and Measurement Technologies,Xi’an,China",
"fullName": "Bing Li",
"givenName": "Bing",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong University,State Key Laboratory For Manufacturing Systems Engineering,Xi’an,China",
"fullName": "Fei Gao",
"givenName": "Fei",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong University,State Key Laboratory For Manufacturing Systems Engineering,Xi’an,China",
"fullName": "Lei Chen",
"givenName": "Lei",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong University,State Key Laboratory For Manufacturing Systems Engineering,Xi’an,China",
"fullName": "Feng Qin",
"givenName": "Feng",
"surname": "Qin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmeas",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "178-184",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6305-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "630500a171",
"articleId": "1I8wEcfCgDe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "630500a185",
"articleId": "1I8wAk0rs3K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iiki/2016/5952/0/5952a374",
"title": "Infrared Thermography and Its Applications in Aircraft Non-destructive Testing",
"doi": null,
"abstractUrl": "/proceedings-article/iiki/2016/5952a374/12OmNAY79eK",
"parentPublication": {
"id": "proceedings/iiki/2016/5952/0",
"title": "2016 International Conference on Identification, Information and Knowledge in the Internet of Things (IIKI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccea/2010/6079/1/05445808",
"title": "Computer Simulation for Laser Welding of Thermoplastic Polymers",
"doi": null,
"abstractUrl": "/proceedings-article/iccea/2010/05445808/12OmNBPtJGE",
"parentPublication": {
"id": "proceedings/iccea/2010/6079/1",
"title": "2010 Second International Conference on Computer Engineering and Applications (ICCEA 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/3/3962e382",
"title": "Simulation Study on the Characteristics of Carbon-Fiber-Reinforced Plastics in Electromagnetic Tomography Nondestructive Evaluation Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962e382/12OmNqBtiZG",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/3",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a342",
"title": "Crack Detection in \"As-Cast\" Steel Using Laser Triangulation and Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a342/12OmNyKrHjL",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2018/8481/0/848100a196",
"title": "Simulation and Experimental Study of Electrolyte Jet-Assisted Laser Micromachining and Punching",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2018/848100a196/17D45XlyDvh",
"parentPublication": {
"id": "proceedings/icmcce/2018/8481/0",
"title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-&-icivpr/2018/5163/0/08641076",
"title": "Crack Propagation Analysis in Ductile Cast Iron by Acoustic Emission Technique",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-&-icivpr/2018/08641076/17PYEkVjiKy",
"parentPublication": {
"id": "proceedings/iciev-&-icivpr/2018/5163/0",
"title": "2018 Joint 7th International Conference on Informatics, Electronics & Vision (ICIEV) and 2018 2nd International Conference on Imaging, Vision & Pattern Recognition (icIVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisai/2021/0692/0/069200a178",
"title": "Finite element modelling and failure behaviour analysis of Carbon Fibre-reinforced Plastic thin-walled tube with cutouts under quasi-static loading",
"doi": null,
"abstractUrl": "/proceedings-article/cisai/2021/069200a178/1BmOgEJWUFO",
"parentPublication": {
"id": "proceedings/cisai/2021/0692/0",
"title": "2021 International Conference on Computer Information Science and Artificial Intelligence (CISAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crc/2019/4620/0/462000a196",
"title": "Remote Crack Measurement Using Android Camera with Laser-Positioning Technique",
"doi": null,
"abstractUrl": "/proceedings-article/crc/2019/462000a196/1iTuKuNHHws",
"parentPublication": {
"id": "proceedings/crc/2019/4620/0",
"title": "2019 4th International Conference on Control, Robotics and Cybernetics (CRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2020/8145/0/09122119",
"title": "Analysis and control of laser weld crack",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2020/09122119/1kRSFLjSWC4",
"parentPublication": {
"id": "proceedings/icedme/2020/8145/0",
"title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2020/8145/0/09122060",
"title": "The study of temperature-dependent degradation of optical output on 808 nm GaAs-Based High-Power Laser Diode Bars",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2020/09122060/1kRSGq89i5a",
"parentPublication": {
"id": "proceedings/icedme/2020/8145/0",
"title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1nTufXLuY2Q",
"title": "2020 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)",
"acronym": "aiea",
"groupId": "1838245",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1nTul2L8zjG",
"doi": "10.1109/AIEA51086.2020.00169",
"title": "Finite Element Analysis of the Thermal Conductivity and the Specific Heat of Carbon Fiber Reinforced Plastic (CFRP) Composites",
"normalizedTitle": "Finite Element Analysis of the Thermal Conductivity and the Specific Heat of Carbon Fiber Reinforced Plastic (CFRP) Composites",
"abstract": "The thermal conductivities of CFRP composites in the direction parallel to the fiber and vertical to the fiber and the specific heat of it were investigated by the methods of theoretical models and finite element analysis (FEA). The thermal conductivities, the specific heats and the volumes of both the carbon fiber and the epoxy resin, as constitute materials, were comprehensively considered in adopting the methods of theoretical models and FEA. In this paper, theoretical model and finite element analysis method are used to study the thermal conductivity and specific heat of CFRP composite, which shows that they have good consistency, and also shows the accuracy and rationality of the finite element analysis results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The thermal conductivities of CFRP composites in the direction parallel to the fiber and vertical to the fiber and the specific heat of it were investigated by the methods of theoretical models and finite element analysis (FEA). The thermal conductivities, the specific heats and the volumes of both the carbon fiber and the epoxy resin, as constitute materials, were comprehensively considered in adopting the methods of theoretical models and FEA. In this paper, theoretical model and finite element analysis method are used to study the thermal conductivity and specific heat of CFRP composite, which shows that they have good consistency, and also shows the accuracy and rationality of the finite element analysis results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The thermal conductivities of CFRP composites in the direction parallel to the fiber and vertical to the fiber and the specific heat of it were investigated by the methods of theoretical models and finite element analysis (FEA). The thermal conductivities, the specific heats and the volumes of both the carbon fiber and the epoxy resin, as constitute materials, were comprehensively considered in adopting the methods of theoretical models and FEA. In this paper, theoretical model and finite element analysis method are used to study the thermal conductivity and specific heat of CFRP composite, which shows that they have good consistency, and also shows the accuracy and rationality of the finite element analysis results.",
"fno": "828800a771",
"keywords": [
"Carbon Fibre Reinforced Plastics",
"Composite Materials",
"Finite Element Analysis",
"Resins",
"Specific Heat",
"Thermal Conductivity",
"Representative Volume Element",
"Epoxy Resin",
"Carbon Fiber Reinforced Plastic Composites",
"Finite Element Analysis",
"CFRP Composite",
"Thermal Conductivity",
"Specific Heat",
"Heating Systems",
"Analytical Models",
"Solid Modeling",
"Epoxy Resins",
"Conductivity",
"Thermal Conductivity",
"Fiber Reinforced Plastics",
"Carbon Fiber Reinforced Plastic CFRP Composites",
"Thermal Conductivity",
"Specific Heat",
"Finite Element Analysis",
"Representative Volume Element RVE"
],
"authors": [
{
"affiliation": "Shanghai University",
"fullName": "Chupeng He",
"givenName": "Chupeng",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai University",
"fullName": "Jingjing Xu",
"givenName": "Jingjing",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aiea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "771-774",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-8288-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "828800a767",
"articleId": "1nTug0qHvKo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "828800a775",
"articleId": "1nTuljCQOvm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2010/3962/3/3962e382",
"title": "Simulation Study on the Characteristics of Carbon-Fiber-Reinforced Plastics in Electromagnetic Tomography Nondestructive Evaluation Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962e382/12OmNqBtiZG",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/3",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999854",
"title": "High Thermal Conductivity Mold Compounds for Advanced Packaging Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999854/12OmNqyUUGO",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156377",
"title": "MetaTracts - A method for robust extraction and visualization of carbon fiber bundles in fiber reinforced composites",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156377/12OmNrYCXXM",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2010/4212/1/4212a709",
"title": "Experimental Study on Reinforced Concrete Column Strengthened by CFRP under Bidirectional Eccentric Load",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2010/4212a709/12OmNvStcvG",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isai/2016/1585/0/1585a446",
"title": "Damage Detection in Cross-Ply CFRP Based on Open Electrical Impedance Tomography",
"doi": null,
"abstractUrl": "/proceedings-article/isai/2016/1585a446/12OmNzE54Cn",
"parentPublication": {
"id": "proceedings/isai/2016/1585/0",
"title": "2016 International Conference on Information System and Artificial Intelligence (ISAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a420",
"title": "Experiments on Secondary Loaded RC Tensile Members Strengthened with CFRP Sheets",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a420/12OmNzmLxOJ",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07493610",
"title": "Interactive Exploration and Visualization Using MetaTracts extracted from Carbon Fiber Reinforced Composites",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07493610/13rRUwj7cpg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2019/1307/0/130700a098",
"title": "Experimental Study on CFRP Strengthened Continuous Reinforced Concrete Slab After Fire",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2019/130700a098/18Av0a78YM0",
"parentPublication": {
"id": "proceedings/icitbs/2019/1307/0",
"title": "2019 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicas/2019/6106/0/610600a865",
"title": "Lightning Discharge Characteristics of Carbon Fiber Reinforced Polymer (CFRP) Wind Turbine Blade",
"doi": null,
"abstractUrl": "/proceedings-article/icicas/2019/610600a865/1iHV0dp3UfC",
"parentPublication": {
"id": "proceedings/icicas/2019/6106/0",
"title": "2019 International Conference on Intelligent Computing, Automation and Systems (ICICAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2021/4486/0/448600a075",
"title": "Progressive Damage Analysis of CFRP Laminates Under Three-Point Bending Load",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2021/448600a075/1yEZikpF5pm",
"parentPublication": {
"id": "proceedings/iccnea/2021/4486/0",
"title": "2021 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1rvCw3aIRdC",
"title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)",
"acronym": "ifeea",
"groupId": "1840345",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1rvCGNgKmE8",
"doi": "10.1109/IFEEA51475.2020.00057",
"title": "Tension Fatigue Behavior of Carbon Fiber Reinforced Resin Matrix Composites under Spectrum Load",
"normalizedTitle": "Tension Fatigue Behavior of Carbon Fiber Reinforced Resin Matrix Composites under Spectrum Load",
"abstract": "Tension-Tension fatigue tests of carbon fiber/epoxy composite blades were studied in this paper. The S-N curve was plotted to predict the fatigue life. Fracture surfaces were observed by SEM to analyze the failure mode under the variable spectrum. The results show that the conditional fatigue limit of the carbon fiber/epoxy composite blade is 302MPa. The major failure modes of the bolted joints include the first resin matrix microcracking, fiber-matrix splitting, delamination and fiber breakage, these failure modes interact and expand until the material fracture occurs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Tension-Tension fatigue tests of carbon fiber/epoxy composite blades were studied in this paper. The S-N curve was plotted to predict the fatigue life. Fracture surfaces were observed by SEM to analyze the failure mode under the variable spectrum. The results show that the conditional fatigue limit of the carbon fiber/epoxy composite blade is 302MPa. The major failure modes of the bolted joints include the first resin matrix microcracking, fiber-matrix splitting, delamination and fiber breakage, these failure modes interact and expand until the material fracture occurs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Tension-Tension fatigue tests of carbon fiber/epoxy composite blades were studied in this paper. The S-N curve was plotted to predict the fatigue life. Fracture surfaces were observed by SEM to analyze the failure mode under the variable spectrum. The results show that the conditional fatigue limit of the carbon fiber/epoxy composite blade is 302MPa. The major failure modes of the bolted joints include the first resin matrix microcracking, fiber-matrix splitting, delamination and fiber breakage, these failure modes interact and expand until the material fracture occurs.",
"fno": "962700a239",
"keywords": [
"Blades",
"Carbon Fibre Reinforced Composites",
"Delamination",
"Failure Analysis",
"Fatigue Cracks",
"Fatigue Testing",
"Filled Polymers",
"Fracture",
"Microcracks",
"Resins",
"Tensile Strength",
"Carbon Fiber Reinforced Resin Matrix Composites",
"Spectrum Load",
"Tension Tension Fatigue Tests",
"Fatigue Life Prediction",
"Failure Mode",
"Conditional Fatigue Limit",
"Resin Matrix Microcracking",
"Fiber Matrix Splitting",
"Delamination",
"Fiber Breakage",
"Tension Fatigue Behavior",
"S N Curve",
"Fracture Surfaces",
"Pressure 302 0 M Pa",
"Scanning Electron Microscopy",
"Blades",
"Surface Morphology",
"Fatigue",
"Resins",
"Optical Fiber Testing",
"Carbon",
"Bolted Joints",
"Variable Amplitude Spectrum Fatigue",
"Composite Materials",
"S N Curve",
"Failure Analysis"
],
"authors": [
{
"affiliation": "Shanghai Marine Diesel Engine Research Institute,Shanghai,China",
"fullName": "Wuchao Chen",
"givenName": "Wuchao",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Marine Diesel Engine Research Institute,Shanghai,China",
"fullName": "Yuan Liu",
"givenName": "Yuan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Marine Diesel Engine Research Institute,Shanghai,China",
"fullName": "Jin Zhang",
"givenName": "Jin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Marine Diesel Engine Research Institute,Shanghai,China",
"fullName": "Junjie Deng",
"givenName": "Junjie",
"surname": "Deng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Marine Diesel Engine Research Institute,Shanghai,China",
"fullName": "Min Xie",
"givenName": "Min",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Marine Diesel Engine Research Institute,Shanghai,China",
"fullName": "Wei Jing",
"givenName": "Wei",
"surname": "Jing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Aeronautics and Astronautics, Shanghai Jiaotong University,Shanghai,China",
"fullName": "Yecheng Lin",
"givenName": "Yecheng",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Aeronautics and Astronautics, Shanghai Jiaotong University,Shanghai,China",
"fullName": "Xiaojing Zhang",
"givenName": "Xiaojing",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ifeea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "239-243",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9627-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "962700a235",
"articleId": "1rvCAPUVm1i",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "962700a244",
"articleId": "1rvCDlEaZrO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2014/6636/0/6636a905",
"title": "Analysis on Fatigue Load Effects of Bridge Cables Based on Traffic Load Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2014/6636a905/12OmNAPjA6x",
"parentPublication": {
"id": "proceedings/icicta/2014/6636/0",
"title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999735",
"title": "Correlation of Dielectric Film Flex Fatigue Resistance and Package Resin Cracking Failure",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999735/12OmNC4wtMe",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecodesign/2001/1266/0/00992397",
"title": "Effects of shot peening treatment on very long life fatigue property in ductile cast irons",
"doi": null,
"abstractUrl": "/proceedings-article/ecodesign/2001/00992397/12OmNwdL7uf",
"parentPublication": {
"id": "proceedings/ecodesign/2001/1266/0",
"title": "Environmentally Conscious Design and Inverse Manufacturing, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2009/3804/4/3804e517",
"title": "Fatigue Analysis of Cement-Stabilized Macadam Using Fracture Toughness",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2009/3804e517/12OmNybfqWM",
"parentPublication": {
"id": "proceedings/icicta/2009/3804/4",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecodim/2001/1266/0/00992397",
"title": "Effects of shot peening treatment on very long life fatigue property in ductile cast irons",
"doi": null,
"abstractUrl": "/proceedings-article/ecodim/2001/00992397/12OmNyuy9Q5",
"parentPublication": {
"id": "proceedings/ecodim/2001/1266/0",
"title": "Proceedings Second International Symposium on Environmentally Conscious Design and Inverse Manufacturing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999979",
"title": "A Study on the Fabrication of Electrical Circuits on Fabrics Using Cu Pattern Laminated B-Stage Adhesive Films for Electronic Textile Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999979/12OmNzTH0FG",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2019/5045/0/504500a090",
"title": "Research on Fatigue Reliability of Knotter Frame",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2019/504500a090/1hHLoBv7jBC",
"parentPublication": {
"id": "proceedings/wcmeim/2019/5045/0",
"title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2020/8145/0/09122067",
"title": "High Cycle Fatigue Behavior of High Strength Steel Q960",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2020/09122067/1kRSzEQ3MCA",
"parentPublication": {
"id": "proceedings/icedme/2020/8145/0",
"title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiea/2020/8288/0/828800a771",
"title": "Finite Element Analysis of the Thermal Conductivity and the Specific Heat of Carbon Fiber Reinforced Plastic (CFRP) Composites",
"doi": null,
"abstractUrl": "/proceedings-article/aiea/2020/828800a771/1nTul2L8zjG",
"parentPublication": {
"id": "proceedings/aiea/2020/8288/0",
"title": "2020 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2021/3596/0/359600a292",
"title": "Low cycle fatigue damage of gun barrel and its monitoring for prevention of fracture",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2021/359600a292/1tMPRlUfUwU",
"parentPublication": {
"id": "proceedings/icedme/2021/3596/0",
"title": "2021 4th International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyQYtf2",
"title": "2017 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCu4nbZ",
"doi": "10.1109/3DV.2017.00018",
"title": "3D Shape Reconstruction from Sketches via Multi-view Convolutional Networks",
"normalizedTitle": "3D Shape Reconstruction from Sketches via Multi-view Convolutional Networks",
"abstract": "We propose a method for reconstructing 3D shapes from 2D sketches in the form of line drawings. Our method takes as input a single sketch, or multiple sketches, and outputs a dense point cloud representing a 3D reconstruction of the input sketch(es). The point cloud is then converted into a polygon mesh. At the heart of our method lies a deep, encoder-decoder network. The encoder converts the sketch into a compact representation encoding shape information. The decoder converts this representation into depth and normal maps capturing the underlying surface from several output viewpoints. The multi-view maps are then consolidated into a 3D point cloud by solving an optimization problem that fuses depth and normals across all viewpoints. Based on our experiments, compared to other methods, such as volumetric networks, our architecture offers several advantages, including more faithful reconstruction, higher output surface resolution, better preservation of topology and shape structure.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a method for reconstructing 3D shapes from 2D sketches in the form of line drawings. Our method takes as input a single sketch, or multiple sketches, and outputs a dense point cloud representing a 3D reconstruction of the input sketch(es). The point cloud is then converted into a polygon mesh. At the heart of our method lies a deep, encoder-decoder network. The encoder converts the sketch into a compact representation encoding shape information. The decoder converts this representation into depth and normal maps capturing the underlying surface from several output viewpoints. The multi-view maps are then consolidated into a 3D point cloud by solving an optimization problem that fuses depth and normals across all viewpoints. Based on our experiments, compared to other methods, such as volumetric networks, our architecture offers several advantages, including more faithful reconstruction, higher output surface resolution, better preservation of topology and shape structure.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a method for reconstructing 3D shapes from 2D sketches in the form of line drawings. Our method takes as input a single sketch, or multiple sketches, and outputs a dense point cloud representing a 3D reconstruction of the input sketch(es). The point cloud is then converted into a polygon mesh. At the heart of our method lies a deep, encoder-decoder network. The encoder converts the sketch into a compact representation encoding shape information. The decoder converts this representation into depth and normal maps capturing the underlying surface from several output viewpoints. The multi-view maps are then consolidated into a 3D point cloud by solving an optimization problem that fuses depth and normals across all viewpoints. Based on our experiments, compared to other methods, such as volumetric networks, our architecture offers several advantages, including more faithful reconstruction, higher output surface resolution, better preservation of topology and shape structure.",
"fno": "261001a067",
"keywords": [
"Image Reconstruction",
"Image Representation",
"Solid Modelling",
"Depth",
"Normal Maps",
"Output Viewpoints",
"Multiview Maps",
"Normals",
"Volumetric Networks",
"Faithful Reconstruction",
"Higher Output Surface Resolution",
"Topology",
"Shape Structure",
"3 D Shape Reconstruction",
"Multiview Convolutional Networks",
"Line Drawings",
"Single Sketch",
"Multiple Sketches",
"Dense Point Cloud",
"Input Sketch Es",
"Polygon Mesh",
"Encoder Decoder Network",
"Compact Representation Encoding Shape Information",
"Shape",
"Three Dimensional Displays",
"Image Reconstruction",
"Surface Reconstruction",
"Decoding",
"Solid Modeling",
"Image Color Analysis",
"Sketch Modeling",
"Shape Reconstruction",
"Convolutional Networks"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhaoliang Lun",
"givenName": "Zhaoliang",
"surname": "Lun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Matheus Gadelha",
"givenName": "Matheus",
"surname": "Gadelha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Evangelos Kalogerakis",
"givenName": "Evangelos",
"surname": "Kalogerakis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Subhransu Maji",
"givenName": "Subhransu",
"surname": "Maji",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rui Wang",
"givenName": "Rui",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "67-77",
"year": "2017",
"issn": "2475-7888",
"isbn": "978-1-5386-2610-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "261001a057",
"articleId": "12OmNxFJXuz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "261001a078",
"articleId": "12OmNB7LvAf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2017/6067/0/08019464",
"title": "Multi-view pairwise relationship learning for sketch based 3D shape retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019464/12OmNy6Zs2q",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2011/06/mcg2011060024",
"title": "NaturaSketch: Modeling from Images and Natural Sketches",
"doi": null,
"abstractUrl": "/magazine/cg/2011/06/mcg2011060024/13rRUIJcWfV",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07530838",
"title": "Shape Synthesis from Sketches via Procedural Models and Convolutional Networks",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07530838/13rRUxOdD2K",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2018/9497/0/949700a311",
"title": "Sketch-Based Shape Retrieval via Multi-view Attention and Generalized Similarity",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2018/949700a311/17D45VObpQZ",
"parentPublication": {
"id": "proceedings/icdh/2018/9497/0",
"title": "2018 7th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3003",
"title": "Sketch2Mesh: Reconstructing and Editing 3D Shapes from Sketches",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3003/1BmJsHikEfu",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08812900",
"title": "High-Quality Textured 3D Shape Reconstruction with Cascaded Fully Convolutional Networks",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08812900/1cPXsi4lQqc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a069",
"title": "Query by Partially-Drawn Sketches for 3D Shape Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a069/1fHkpp4xIJi",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2020/9234/0/923400a184",
"title": "Deep 3D Shape Reconstruction from Single-View Sketch Image",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2020/923400a184/1uGY2GTiIda",
"parentPublication": {
"id": "proceedings/icdh/2020/9234/0",
"title": "2020 8th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2020/8666/0/866600a223",
"title": "Sketch-based 3D Shape Retrieval with Multi-Silhouette View Based on Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2020/866600a223/1wRIvGNgH9m",
"parentPublication": {
"id": "proceedings/icicta/2020/8666/0",
"title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900g008",
"title": "Sketch2Model: View-Aware 3D Modeling from Single Free-Hand Sketches",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900g008/1yeM42IPa3S",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1fHHpVzB0CA",
"title": "2019 18th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"acronym": "sbgames",
"groupId": "1800056",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1fHHpYoajCg",
"doi": "10.1109/SBGames.2019.00015",
"title": "A Survey of Procedural Dungeon Generation",
"normalizedTitle": "A Survey of Procedural Dungeon Generation",
"abstract": "Procedural content generation (PCG) is a method of content creation fully or semi-performed by computers. PCG is widely used in game development to generate game content, from Rogue (1998) to No Man's Sky (2016). PCG generates final contents, which are ready to be added to a game, or intermediate contents, which are might work as content sketch to be polished by human designers. In this paper we survey the current state of procedural dungeon generation (PDG) research, a subarea of PCG. We analyzed the works according to the game features they generate, the solution strategy employed and the taxonomy of procedural content generation. Some of the relevant findings of the survey are: (1) PDG for 3D levels has been little explored; (2) few works supported levels with barriers, a game mechanic which blocks, temporarily, the player progression, and; (3) and just a few solutions relied on mixed-initiative approach, where a human design content is combined with a computer generated level.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Procedural content generation (PCG) is a method of content creation fully or semi-performed by computers. PCG is widely used in game development to generate game content, from Rogue (1998) to No Man's Sky (2016). PCG generates final contents, which are ready to be added to a game, or intermediate contents, which are might work as content sketch to be polished by human designers. In this paper we survey the current state of procedural dungeon generation (PDG) research, a subarea of PCG. We analyzed the works according to the game features they generate, the solution strategy employed and the taxonomy of procedural content generation. Some of the relevant findings of the survey are: (1) PDG for 3D levels has been little explored; (2) few works supported levels with barriers, a game mechanic which blocks, temporarily, the player progression, and; (3) and just a few solutions relied on mixed-initiative approach, where a human design content is combined with a computer generated level.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Procedural content generation (PCG) is a method of content creation fully or semi-performed by computers. PCG is widely used in game development to generate game content, from Rogue (1998) to No Man's Sky (2016). PCG generates final contents, which are ready to be added to a game, or intermediate contents, which are might work as content sketch to be polished by human designers. In this paper we survey the current state of procedural dungeon generation (PDG) research, a subarea of PCG. We analyzed the works according to the game features they generate, the solution strategy employed and the taxonomy of procedural content generation. Some of the relevant findings of the survey are: (1) PDG for 3D levels has been little explored; (2) few works supported levels with barriers, a game mechanic which blocks, temporarily, the player progression, and; (3) and just a few solutions relied on mixed-initiative approach, where a human design content is combined with a computer generated level.",
"fno": "463700a029",
"keywords": [
"Computer Games",
"Procedural Dungeon Generation",
"Procedural Content Generation",
"PCG",
"Game Content",
"Human Design Content",
"PDG",
"Games",
"Two Dimensional Displays",
"Genetic Algorithms",
"Taxonomy",
"Three Dimensional Displays",
"Grammar",
"Entertainment Industry",
"Survey",
"Procedural Content Generation",
"Game Content Generation",
"Procedural Dungeon Generation"
],
"authors": [
{
"affiliation": "Universidade Federal do Rio Grande do Norte",
"fullName": "Breno M. F. Viana",
"givenName": "Breno M. F.",
"surname": "Viana",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade Federal do Rio Grande do Norte",
"fullName": "Selan R. dos Santos",
"givenName": "Selan R.",
"surname": "dos Santos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sbgames",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "29-38",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4637-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "463700a021",
"articleId": "1fHHqBDDrJm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "463700a039",
"articleId": "1fHHreI2dLW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sbgames/2017/4846/0/484601a010",
"title": "A Fast Approach for Automatic Generation of Populated Maps with Seed and Difficulty Control",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2017/484601a010/12OmNxA3Z93",
"parentPublication": {
"id": "proceedings/sbgames/2017/4846/0",
"title": "2017 16th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2017/4846/0/484601a116",
"title": "Improving Procedural 2D Map Generation Based on Multi-Layered Cellular Automata and Hilbert Curves",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2017/484601a116/12OmNzIUfZh",
"parentPublication": {
"id": "proceedings/sbgames/2017/4846/0",
"title": "2017 16th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2016/03/07066888",
"title": "Petalz: Search-Based Procedural Content Generation for the Casual Gamer",
"doi": null,
"abstractUrl": "/journal/ci/2016/03/07066888/13rRUxAStUJ",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2015/01/06853332",
"title": "Learning-based procedural content generation",
"doi": null,
"abstractUrl": "/journal/ci/2015/01/06853332/13rRUxYINaS",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2011/03/05756645",
"title": "Search-Based Procedural Content Generation: A Taxonomy and Survey",
"doi": null,
"abstractUrl": "/journal/ci/2011/03/05756645/13rRUxYrbX8",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2014/01/06661386",
"title": "Procedural Generation of Dungeons",
"doi": null,
"abstractUrl": "/journal/ci/2014/01/06661386/13rRUyoyhGd",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2011/03/tta2011030147",
"title": "Experience-Driven Procedural Content Generation",
"doi": null,
"abstractUrl": "/journal/ta/2011/03/tta2011030147/13rRUytnsVi",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a186",
"title": "A Survey of Procedural Content Generation for Games",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a186/1H2pn5E6Upy",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2019/5434/0/543400a147",
"title": "Procedural Content Generation using Artificial Intelligence for Unique Virtual Reality Game Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2019/543400a147/1fHjxk3ixna",
"parentPublication": {
"id": "proceedings/svr/2019/5434/0",
"title": "2019 21st Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gi/2021/4466/0/446600a039",
"title": "(Genetically) Improving Novelty in Procedural Story Generation",
"doi": null,
"abstractUrl": "/proceedings-article/gi/2021/446600a039/1v2QLKRKBiM",
"parentPublication": {
"id": "proceedings/gi/2021/4466/0",
"title": "2021 IEEE/ACM International Workshop on Genetic Improvement (GI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCeaPZC",
"title": "2010 IEEE 39th Applied Imagery Pattern Recognition Workshop (AIPR)",
"acronym": "aipr",
"groupId": "1000046",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB836JG",
"doi": "10.1109/AIPR.2010.5759692",
"title": "Pre-attentive detection of depth saliency using stereo vision",
"normalizedTitle": "Pre-attentive detection of depth saliency using stereo vision",
"abstract": "A quick estimation of depth is required by artificial vision systems for their self survival and navigation through the environment. Following the selection strategy of biological vision, known as visual attention, can help in accelerating extraction of depth for important and relevant portions of given scenes. Recent studies on depth perception in biological vision indicate that disparity is computed using object detection in the brain. The proposed method uses concepts from these studies and determines the shift that objects go through in the stereo frames using data regarding their borders. This enables efficient creation of depth saliency map for artificial visual attention. Results of the proposed model have shown success in selecting those locations from stereo scenes that are salient for human perception in terms of depth.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A quick estimation of depth is required by artificial vision systems for their self survival and navigation through the environment. Following the selection strategy of biological vision, known as visual attention, can help in accelerating extraction of depth for important and relevant portions of given scenes. Recent studies on depth perception in biological vision indicate that disparity is computed using object detection in the brain. The proposed method uses concepts from these studies and determines the shift that objects go through in the stereo frames using data regarding their borders. This enables efficient creation of depth saliency map for artificial visual attention. Results of the proposed model have shown success in selecting those locations from stereo scenes that are salient for human perception in terms of depth.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A quick estimation of depth is required by artificial vision systems for their self survival and navigation through the environment. Following the selection strategy of biological vision, known as visual attention, can help in accelerating extraction of depth for important and relevant portions of given scenes. Recent studies on depth perception in biological vision indicate that disparity is computed using object detection in the brain. The proposed method uses concepts from these studies and determines the shift that objects go through in the stereo frames using data regarding their borders. This enables efficient creation of depth saliency map for artificial visual attention. Results of the proposed model have shown success in selecting those locations from stereo scenes that are salient for human perception in terms of depth.",
"fno": "05759692",
"keywords": [
"Brain",
"Feature Extraction",
"Medical Image Processing",
"Object Detection",
"Stereo Image Processing",
"Visual Perception",
"Preattentive Detection",
"Depth Saliency Map",
"Stereo Vision",
"Depth Estimation",
"Artificial Vision System",
"Self Survival",
"Navigation",
"Biological Vision",
"Visual Attention",
"Depth Extraction",
"Depth Perception",
"Object Detection",
"Brain",
"Stereo Frame",
"Stereo Scene",
"Human Perception",
"Stereo Image Processing",
"Strips",
"Pixel",
"Visualization",
"Humans",
"Stereo Vision",
"Image Segmentation",
"Image Color Analysis",
"Visual Attention Modeling",
"Depth Saliency",
"Stereo Image Processing"
],
"authors": [
{
"affiliation": "GET Lab, Paderborn University, 33098 Paderborn, Germany",
"fullName": "M. Zaheer Aziz",
"givenName": "M. Zaheer",
"surname": "Aziz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "GET Lab, Paderborn University, 33098 Paderborn, Germany",
"fullName": "Bärbel Mertsching",
"givenName": "Bärbel",
"surname": "Mertsching",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2010",
"issn": "1550-5219",
"isbn": "978-1-4244-8833-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05759691",
"articleId": "12OmNAsk4zY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05759693",
"articleId": "12OmNyGKUmJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/06977434",
"title": "Depth Super-resolution by Fusing Depth Imaging and Stereo Vision with Structural Determinant Information Inference",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977434/12OmNAXPymB",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicrosp/1996/7456/0/74560360",
"title": "A Distributed Adaptive Architecture for Analog Stereo Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/nicrosp/1996/74560360/12OmNBlXs6D",
"parentPublication": {
"id": "proceedings/nicrosp/1996/7456/0",
"title": "Neural Networks for Identification, Control, and Robotics, International Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130497",
"title": "Stereo estimation of depth along virtual cut planes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130497/12OmNBqMDEE",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028242",
"title": "On stereo image coding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028242/12OmNwEJ0Qi",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2004/2159/0/01384832",
"title": "Tyzx DeepSea High Speed Stereo Vision System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2004/01384832/12OmNx0A7LZ",
"parentPublication": {
"id": "proceedings/cvprw/2004/2159/0",
"title": "2004 Conference on Computer Vision and Pattern Recognition Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iai/1994/6250/0/00336690",
"title": "Stereo vision using Gabor wavelets",
"doi": null,
"abstractUrl": "/proceedings-article/iai/1994/00336690/12OmNzYeB1p",
"parentPublication": {
"id": "proceedings/iai/1994/6250/0",
"title": "Proceedings of the IEEE Southwest Symposium on Image Analysis and Interpretation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07544591",
"title": "Stereoscopic Thumbnail Creation via Efficient Stereo Saliency Detection",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07544591/13rRUyfbwqP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412461",
"title": "Enhancing Depth Quality of Stereo Vision using Deep Learning-based Prior Information of the Driving Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412461/1tmiFt2l7s4",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2021/0032/0/09555517",
"title": "Sharpness Enhancement of Stereo Images Using a Depth-Based Per-Pixel Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2021/09555517/1xxcmkuKABa",
"parentPublication": {
"id": "proceedings/iisa/2021/0032/0",
"title": "2021 12th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBDyAaZ",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvAiSNK",
"doi": "10.1109/ICCV.2015.37",
"title": "Automatic Thumbnail Generation Based on Visual Representativeness and Foreground Recognizability",
"normalizedTitle": "Automatic Thumbnail Generation Based on Visual Representativeness and Foreground Recognizability",
"abstract": "We present an automatic thumbnail generation technique based on two essential considerations: how well they visually represent the original photograph, and how well the foreground can be recognized after the cropping and downsizing steps of thumbnailing. These factors, while important for the image indexing purpose of thumbnails, have largely been ignored in previous methods, which instead are designed to highlight salient content while disregarding the effects of downsizing. We propose a set of image features for modeling these two considerations of thumbnails, and learn how to balance their relative effects on thumbnail generation through training on image pairs composed of photographs and their corresponding thumbnails created by an expert photographer. Experiments show the effectiveness of this approach on a variety of images, as well as its advantages over related techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an automatic thumbnail generation technique based on two essential considerations: how well they visually represent the original photograph, and how well the foreground can be recognized after the cropping and downsizing steps of thumbnailing. These factors, while important for the image indexing purpose of thumbnails, have largely been ignored in previous methods, which instead are designed to highlight salient content while disregarding the effects of downsizing. We propose a set of image features for modeling these two considerations of thumbnails, and learn how to balance their relative effects on thumbnail generation through training on image pairs composed of photographs and their corresponding thumbnails created by an expert photographer. Experiments show the effectiveness of this approach on a variety of images, as well as its advantages over related techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an automatic thumbnail generation technique based on two essential considerations: how well they visually represent the original photograph, and how well the foreground can be recognized after the cropping and downsizing steps of thumbnailing. These factors, while important for the image indexing purpose of thumbnails, have largely been ignored in previous methods, which instead are designed to highlight salient content while disregarding the effects of downsizing. We propose a set of image features for modeling these two considerations of thumbnails, and learn how to balance their relative effects on thumbnail generation through training on image pairs composed of photographs and their corresponding thumbnails created by an expert photographer. Experiments show the effectiveness of this approach on a variety of images, as well as its advantages over related techniques.",
"fno": "8391a253",
"keywords": [
"Agriculture",
"Visualization",
"Image Color Analysis",
"Training",
"Feature Extraction",
"Image Edge Detection",
"Measurement"
],
"authors": [
{
"affiliation": null,
"fullName": "Jingwei Huang",
"givenName": "Jingwei",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Huarong Chen",
"givenName": "Huarong",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bin Wang",
"givenName": "Bin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stephen Lin",
"givenName": "Stephen",
"surname": "Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "253-261",
"year": "2015",
"issn": "2380-7504",
"isbn": "978-1-4673-8391-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8391a244",
"articleId": "12OmNrkBwqy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8391a262",
"articleId": "12OmNx8fiir",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2012/4711/0/4711a848",
"title": "3D Storyboards for Interactive Visual Search",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a848/12OmNqBtiL3",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/1995/7201/0/72010115",
"title": "Case study: an empirical investigation of thumbnail image recognition",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/1995/72010115/12OmNrYlmFF",
"parentPublication": {
"id": "proceedings/ieee-infovis/1995/7201/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2012/2027/0/06266442",
"title": "A Visual Search User Study on the Influences of Aspect Ratio Distortion of Preview Thumbnails",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2012/06266442/12OmNx4gUkE",
"parentPublication": {
"id": "proceedings/icmew/2012/2027/0",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/08099928",
"title": "Fast-At: Fast Automatic Thumbnail Generation Using Deep Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/08099928/12OmNxR5UGZ",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298994",
"title": "Multi-task deep visual-semantic embedding for video thumbnail selection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298994/12OmNy2agPU",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607401",
"title": "Super-resolution for low quality thumbnail images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607401/12OmNyqRno7",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07544591",
"title": "Stereoscopic Thumbnail Creation via Efficient Stereo Saliency Detection",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07544591/13rRUyfbwqP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a054",
"title": "Thumbnail Image Selection for VOD Services",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a054/19wB2M1XXAA",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0/08726759",
"title": "An Improved Automatic Thumbnail Generation Algorithm Based on Interpolation Technique",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2018/08726759/1axfscz4YoM",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0",
"title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2020/9234/0/923400a254",
"title": "Automatic Generation of Informative Video Thumbnail",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2020/923400a254/1uGXYkWM0Ra",
"parentPublication": {
"id": "proceedings/icdh/2020/9234/0",
"title": "2020 8th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAY79oS",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvjgWv5",
"doi": "10.1109/ICME.2014.6890321",
"title": "Learning to detect stereo saliency",
"normalizedTitle": "Learning to detect stereo saliency",
"abstract": "This paper develops a novel learning-based method for detecting stereo saliency in stereopair images. The disparity maps computed from stereopair images provide an additional depth cue for stereo saliency detection. To the best of our knowledge, our approach is the first one to simultaneously detect the stereo saliency of both left and right images using support vector machine (SVM). In our work, the disparity maps are used in two aspects. One is to improve the performance of saliency detection for monocular image. The other one is to maintain the consistency between the stereo matching and saliency maps. In order to meet the above requirements, we propose a new combinational saliency feature to train the stereo images with the labeled saliency ground truth, using support vector machine as the classifier. In the test stage, our approach generates the stereo saliency results according to the trained SVM model. Furthermore, a stereopair saliency dataset containing 400 pairs of images is created to perform the challenging experiments. The experimental results have demonstrated that our method achieves better performance than the state-of-the-art algorithms of single-image saliency detection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper develops a novel learning-based method for detecting stereo saliency in stereopair images. The disparity maps computed from stereopair images provide an additional depth cue for stereo saliency detection. To the best of our knowledge, our approach is the first one to simultaneously detect the stereo saliency of both left and right images using support vector machine (SVM). In our work, the disparity maps are used in two aspects. One is to improve the performance of saliency detection for monocular image. The other one is to maintain the consistency between the stereo matching and saliency maps. In order to meet the above requirements, we propose a new combinational saliency feature to train the stereo images with the labeled saliency ground truth, using support vector machine as the classifier. In the test stage, our approach generates the stereo saliency results according to the trained SVM model. Furthermore, a stereopair saliency dataset containing 400 pairs of images is created to perform the challenging experiments. The experimental results have demonstrated that our method achieves better performance than the state-of-the-art algorithms of single-image saliency detection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper develops a novel learning-based method for detecting stereo saliency in stereopair images. The disparity maps computed from stereopair images provide an additional depth cue for stereo saliency detection. To the best of our knowledge, our approach is the first one to simultaneously detect the stereo saliency of both left and right images using support vector machine (SVM). In our work, the disparity maps are used in two aspects. One is to improve the performance of saliency detection for monocular image. The other one is to maintain the consistency between the stereo matching and saliency maps. In order to meet the above requirements, we propose a new combinational saliency feature to train the stereo images with the labeled saliency ground truth, using support vector machine as the classifier. In the test stage, our approach generates the stereo saliency results according to the trained SVM model. Furthermore, a stereopair saliency dataset containing 400 pairs of images is created to perform the challenging experiments. The experimental results have demonstrated that our method achieves better performance than the state-of-the-art algorithms of single-image saliency detection.",
"fno": "06890321",
"keywords": [
"Support Vector Machines",
"Feature Extraction",
"Bayes Methods",
"Training",
"Learning Systems",
"Principal Component Analysis",
"Visualization",
"Feature Detection",
"Stereo Saliency Detection",
"Stereopair Images",
"Support Vector Machine"
],
"authors": [
{
"affiliation": "Beijing Key Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing 100081, China",
"fullName": "Fang Guo",
"givenName": "Fang",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Key Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing 100081, China",
"fullName": "Jianbing Shen",
"givenName": "Jianbing",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for OPTical IMagery Analysis and Learning (OPTIMAL), State Key Laboratory of Transient Optics and Photonics, Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi'an 710119, Shaanxi, P. R. China",
"fullName": "Xuelong Li",
"givenName": "Xuelong",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4761-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06890320",
"articleId": "12OmNzuIjfZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06890322",
"articleId": "12OmNvjyxUU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2013/4983/0/4983a075",
"title": "Existence Detection of Objects in Images for Robot Vision Using Saliency Histogram Features",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2013/4983a075/12OmNAoDi9i",
"parentPublication": {
"id": "proceedings/crv/2013/4983/0",
"title": "2013 International Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209d428",
"title": "Information Divergence Based Saliency Detection with a Global Center-Surround Mechanism",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209d428/12OmNwlZtZR",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/443P3C15",
"title": "Discriminative spatial saliency for image classification",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/443P3C15/12OmNx6xHtN",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a022",
"title": "Saliency Cut in Stereo Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a022/12OmNxwWoGA",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/058P1B05",
"title": "Leveraging stereopsis for saliency analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/058P1B05/12OmNy50g5B",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607523",
"title": "Saliency map fusion based on rank-one constraint",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607523/12OmNzcPAyO",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07544591",
"title": "Stereoscopic Thumbnail Creation via Efficient Stereo Saliency Detection",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07544591/13rRUyfbwqP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a256",
"title": "Adversarial Learning Based Saliency Detection",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a256/17D45WZZ7F9",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a156",
"title": "Oil Tank Detection via Target-Driven Learning Saliency Model",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a156/17D45XeKgqj",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956579",
"title": "Learning to Predict 3D Mesh Saliency",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956579/1IHpMRBlt04",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwdbV00",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwwd2RX",
"doi": "10.1109/CVPR.2012.6247962",
"title": "Edge-preserving photometric stereo via depth fusion",
"normalizedTitle": "Edge-preserving photometric stereo via depth fusion",
"abstract": "We present a sensor fusion scheme that combines active stereo with photometric stereo. Aiming at capturing full-frame depth for dynamic scenes at a minimum of three lighting conditions, we formulate an iterative optimization scheme that (1) adaptively adjusts the contribution from photometric stereo so that discontinuity can be preserved; (2) detects shadow areas by checking the visibility of the estimated point with respect to the light source, instead of using image-based heuristics; and (3) behaves well for ill-conditioned pixels that are under shadow, which are inevitable in almost any scene. Furthermore, we decompose our non-linear cost function into subproblems that can be optimized efficiently using linear techniques. Experiments show significantly improved results over the previous state-of-the-art in sensor fusion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a sensor fusion scheme that combines active stereo with photometric stereo. Aiming at capturing full-frame depth for dynamic scenes at a minimum of three lighting conditions, we formulate an iterative optimization scheme that (1) adaptively adjusts the contribution from photometric stereo so that discontinuity can be preserved; (2) detects shadow areas by checking the visibility of the estimated point with respect to the light source, instead of using image-based heuristics; and (3) behaves well for ill-conditioned pixels that are under shadow, which are inevitable in almost any scene. Furthermore, we decompose our non-linear cost function into subproblems that can be optimized efficiently using linear techniques. Experiments show significantly improved results over the previous state-of-the-art in sensor fusion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a sensor fusion scheme that combines active stereo with photometric stereo. Aiming at capturing full-frame depth for dynamic scenes at a minimum of three lighting conditions, we formulate an iterative optimization scheme that (1) adaptively adjusts the contribution from photometric stereo so that discontinuity can be preserved; (2) detects shadow areas by checking the visibility of the estimated point with respect to the light source, instead of using image-based heuristics; and (3) behaves well for ill-conditioned pixels that are under shadow, which are inevitable in almost any scene. Furthermore, we decompose our non-linear cost function into subproblems that can be optimized efficiently using linear techniques. Experiments show significantly improved results over the previous state-of-the-art in sensor fusion.",
"fno": "312P3A01",
"keywords": [
"Stereo Image Processing",
"Edge Detection",
"Image Fusion",
"Iterative Methods",
"Optimisation",
"Nonlinear Cost Function",
"Edge Preserving Photometric Stereo",
"Depth Fusion",
"Sensor Fusion Scheme",
"Active Stereo",
"Iterative Optimization Scheme",
"Shadow Area Detection",
"Image Based Heuristics",
"Ill Conditioned Pixels",
"Optimization",
"Light Sources",
"Stereo Vision",
"Lighting",
"Sensor Fusion",
"Image Reconstruction",
"Cameras"
],
"authors": [
{
"affiliation": null,
"fullName": "Huimin Yu",
"givenName": null,
"surname": "Huimin Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "B. Wilburn",
"givenName": "B.",
"surname": "Wilburn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Y. Matsushita",
"givenName": "Y.",
"surname": "Matsushita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ruigang Yang",
"givenName": null,
"surname": "Ruigang Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mao Ye",
"givenName": null,
"surname": "Mao Ye",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qing Zhang",
"givenName": null,
"surname": "Qing Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2472-2479",
"year": "2012",
"issn": "1063-6919",
"isbn": "978-1-4673-1226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "311P2C49",
"articleId": "12OmNwwMf2R",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "313P3A02",
"articleId": "12OmNvoWV0O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1992/2855/0/00223147",
"title": "Shape reconstruction from photometric stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223147/12OmNAWH9Gg",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1999/0149/1/01491119",
"title": "An Integral Formulation for Differential Photometric Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1999/01491119/12OmNqBKU5y",
"parentPublication": {
"id": "proceedings/cvpr/1999/0149/2",
"title": "Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840b161",
"title": "Multiview Photometric Stereo Using Planar Mesh Parameterization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840b161/12OmNrAv3Cy",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034c961",
"title": "Depth Super-Resolution Meets Uncalibrated Photometric Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c961/12OmNwwuE3d",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457494",
"title": "Improving photometric stereo with laser sectioning",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457494/12OmNyvY9rH",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/03/ttp2008030548",
"title": "Multiview Photometric Stereo",
"doi": null,
"abstractUrl": "/journal/tp/2008/03/ttp2008030548/13rRUB7a1gX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/07/ttp2011071400",
"title": "Reliability Fusion of Time-of-Flight Depth and Stereo Geometry for High Quality Depth Maps",
"doi": null,
"abstractUrl": "/journal/tp/2011/07/ttp2011071400/13rRUxNW1UW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2014/06/mcg2014060057",
"title": "Multispectral Photometric Stereo for Acquiring High-Fidelity Surface Normals",
"doi": null,
"abstractUrl": "/magazine/cg/2014/06/mcg2014060057/13rRUy3gn2L",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/01/08478369",
"title": "Semi-Calibrated Photometric Stereo",
"doi": null,
"abstractUrl": "/journal/tp/2020/01/08478369/141AnpAbeCh",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a222",
"title": "Photometric Segmentation: Simultaneous Photometric Stereo and Masking",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a222/1ezRCNjuOti",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxR5UGZ",
"doi": "10.1109/CVPR.2017.445",
"title": "Fast-At: Fast Automatic Thumbnail Generation Using Deep Neural Networks",
"normalizedTitle": "Fast-At: Fast Automatic Thumbnail Generation Using Deep Neural Networks",
"abstract": "Fast-AT is an automatic thumbnail generation system based on deep neural networks. It is a fully-convolutional deep neural network, which learns specific filters for thumbnails of different sizes and aspect ratios. During inference, the appropriate filter is selected depending on the dimensions of the target thumbnail. Unlike most previous work, Fast-AT does not utilize saliency but addresses the problem directly. In addition, it eliminates the need to conduct region search over the saliency map. The model generalizes to thumbnails of different sizes including those with extreme aspect ratios and can generate thumbnails in real time. A data set of more than 70,000 thumbnail annotations was collected to train Fast-AT. We show competitive results in comparison to existing techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Fast-AT is an automatic thumbnail generation system based on deep neural networks. It is a fully-convolutional deep neural network, which learns specific filters for thumbnails of different sizes and aspect ratios. During inference, the appropriate filter is selected depending on the dimensions of the target thumbnail. Unlike most previous work, Fast-AT does not utilize saliency but addresses the problem directly. In addition, it eliminates the need to conduct region search over the saliency map. The model generalizes to thumbnails of different sizes including those with extreme aspect ratios and can generate thumbnails in real time. A data set of more than 70,000 thumbnail annotations was collected to train Fast-AT. We show competitive results in comparison to existing techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Fast-AT is an automatic thumbnail generation system based on deep neural networks. It is a fully-convolutional deep neural network, which learns specific filters for thumbnails of different sizes and aspect ratios. During inference, the appropriate filter is selected depending on the dimensions of the target thumbnail. Unlike most previous work, Fast-AT does not utilize saliency but addresses the problem directly. In addition, it eliminates the need to conduct region search over the saliency map. The model generalizes to thumbnails of different sizes including those with extreme aspect ratios and can generate thumbnails in real time. A data set of more than 70,000 thumbnail annotations was collected to train Fast-AT. We show competitive results in comparison to existing techniques.",
"fno": "08099928",
"keywords": [
"Convolution",
"Feature Selection",
"Image Filtering",
"Neural Nets",
"Fast Automatic Thumbnail Generation",
"Automatic Thumbnail Generation System",
"Fast AT",
"Convolutional Deep Neural Network",
"Thumbnail Annotations",
"Filter Selection",
"Agriculture",
"Object Detection",
"Proposals",
"Nails",
"Neural Networks",
"Training",
"Machine Learning"
],
"authors": [
{
"affiliation": null,
"fullName": "Seyed A. Esmaeili",
"givenName": "Seyed A.",
"surname": "Esmaeili",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bharat Singh",
"givenName": "Bharat",
"surname": "Singh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Larry S. Davis",
"givenName": "Larry S.",
"surname": "Davis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "4178-4186",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08099927",
"articleId": "12OmNzIUfK8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08099929",
"articleId": "12OmNBSjIWf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2016/2491/0/2491a266",
"title": "Performance Evaluation of Bottom-Up Saliency Models for Object Proposal Generation",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a266/12OmNBqv2dW",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a253",
"title": "Automatic Thumbnail Generation Based on Visual Representativeness and Foreground Recognizability",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a253/12OmNvAiSNK",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2017/3861/0/07991599",
"title": "Exploring Salient Thumbnail Generation for Archival Collections Online",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2017/07991599/12OmNwOnn0V",
"parentPublication": {
"id": "proceedings/jcdl/2017/3861/0",
"title": "2017 ACM/IEEE Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2011/1101/0/06126409",
"title": "Scale and object aware image retargeting for thumbnail browsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2011/06126409/12OmNxxNbZ5",
"parentPublication": {
"id": "proceedings/iccv/2011/1101/0",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2018/03/07486110",
"title": "Fast Object Detection at Constrained Energy",
"doi": null,
"abstractUrl": "/journal/ec/2018/03/07486110/13rRUwcAqtu",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07544591",
"title": "Stereoscopic Thumbnail Creation via Efficient Stereo Saliency Detection",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07544591/13rRUyfbwqP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486533",
"title": "From Thumbnails to Summaries-A Single Deep Neural Network to Rule Them All",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486533/14jQfPe2Vcb",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f437",
"title": "Good View Hunting: Learning Photo Composition from Dense View Pairs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f437/17D45VObpOI",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0/08726759",
"title": "An Improved Automatic Thumbnail Generation Algorithm Based on Interpolation Technique",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2018/08726759/1axfscz4YoM",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0",
"title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2020/9234/0/923400a254",
"title": "Automatic Generation of Informative Video Thumbnail",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2020/923400a254/1uGXYkWM0Ra",
"parentPublication": {
"id": "proceedings/icdh/2020/9234/0",
"title": "2020 8th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKJiwN",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxwWoGA",
"doi": "10.1109/ICCVW.2013.10",
"title": "Saliency Cut in Stereo Images",
"normalizedTitle": "Saliency Cut in Stereo Images",
"abstract": "In this paper, we propose a novel saliency-aware stereo images segmentation approach using the high-order energy items, which utilizes the disparity map and statistical information of stereo images to enrich the high-order potentials. To the best of our knowledge, our approach is first one to formulate the automatic stereo cut as the high-order energy optimization problems, which simultaneously segments the foreground objects in left and right images using the proposed high-order energy function. The relationships of stereo correspondence by disparity maps are further employed to enhance the connections between the left and right images. Experimental results demonstrate that the proposed approach can effectively improve the saliency-aware segmentation performance of stereo images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a novel saliency-aware stereo images segmentation approach using the high-order energy items, which utilizes the disparity map and statistical information of stereo images to enrich the high-order potentials. To the best of our knowledge, our approach is first one to formulate the automatic stereo cut as the high-order energy optimization problems, which simultaneously segments the foreground objects in left and right images using the proposed high-order energy function. The relationships of stereo correspondence by disparity maps are further employed to enhance the connections between the left and right images. Experimental results demonstrate that the proposed approach can effectively improve the saliency-aware segmentation performance of stereo images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a novel saliency-aware stereo images segmentation approach using the high-order energy items, which utilizes the disparity map and statistical information of stereo images to enrich the high-order potentials. To the best of our knowledge, our approach is first one to formulate the automatic stereo cut as the high-order energy optimization problems, which simultaneously segments the foreground objects in left and right images using the proposed high-order energy function. The relationships of stereo correspondence by disparity maps are further employed to enhance the connections between the left and right images. Experimental results demonstrate that the proposed approach can effectively improve the saliency-aware segmentation performance of stereo images.",
"fno": "3022a022",
"keywords": [
"Image Segmentation",
"Stereo Vision",
"Optimization",
"Image Color Analysis",
"Three Dimensional Displays",
"Reactive Power",
"Conferences",
"Optimization",
"Saliency",
"Stereo Images",
"Segmentation"
],
"authors": [
{
"affiliation": null,
"fullName": "Jianteng Peng",
"givenName": "Jianteng",
"surname": "Peng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jianbing Shen",
"givenName": "Jianbing",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yunde Jia",
"givenName": "Yunde",
"surname": "Jia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xuelong Li",
"givenName": "Xuelong",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "22-28",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-3022-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3022a015",
"articleId": "12OmNzDvSh9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3022a029",
"articleId": "12OmNCeK2gP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/aipr/2010/8833/0/05759692",
"title": "Pre-attentive detection of depth saliency using stereo vision",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2010/05759692/12OmNB836JG",
"parentPublication": {
"id": "proceedings/aipr/2010/8833/0",
"title": "2010 IEEE 39th Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460704",
"title": "Stereo matching on low intensity quantization images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460704/12OmNC8MsJo",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccrd/2010/4043/0/4043a733",
"title": "The Solution of Stereo Correspondence Problem Using Block Matching Algorithm in Stereo Vision Mobile Robot",
"doi": null,
"abstractUrl": "/proceedings-article/iccrd/2010/4043a733/12OmNvDI3Ro",
"parentPublication": {
"id": "proceedings/iccrd/2010/4043/0",
"title": "Computer Research and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890321",
"title": "Learning to detect stereo saliency",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890321/12OmNvjgWv5",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1993/3560/0/00522835",
"title": "Smoothing algorithm for 3-D surface rendering from stereo images",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1993/00522835/12OmNvlxJoi",
"parentPublication": {
"id": "proceedings/ssst/1993/3560/0",
"title": "1993 (25th) Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206656",
"title": "A stereo approach that handles the matting problem via image warping",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206656/12OmNx3Zjp1",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/058P1B05",
"title": "Leveraging stereopsis for saliency analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/058P1B05/12OmNy50g5B",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2009/6325/0/05447323",
"title": "A Segment-Based Dense Stereo Matching Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2009/05447323/12OmNz2C1wg",
"parentPublication": {
"id": "proceedings/isise/2009/6325/0",
"title": "2009 Second International Symposium on Information Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/05/ttp2013051094",
"title": "Joint Depth Map and Color Consistency Estimation for Stereo Images with Different Illuminations and Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2013/05/ttp2013051094/13rRUxOdD9t",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07544591",
"title": "Stereoscopic Thumbnail Creation via Efficient Stereo Saliency Detection",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07544591/13rRUyfbwqP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1axf9vWJZZK",
"title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"acronym": "ithings-greencom-cpscom-smartdata",
"groupId": "1800308",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "1axfscz4YoM",
"doi": "10.1109/Cybermatics_2018.2018.00141",
"title": "An Improved Automatic Thumbnail Generation Algorithm Based on Interpolation Technique",
"normalizedTitle": "An Improved Automatic Thumbnail Generation Algorithm Based on Interpolation Technique",
"abstract": "In servers for mobile devices, the generating speed for thumbnails should be fast and efficient. Previous research focused on the visual quality of thumbnails. In this paper, an interpolation-based thumbnail generation method is proposed to accelerate the generation processes. The proposed algorithm was based on area average sampling interpolation, which an improved sampling method to reduce the amount of sample data. The algorithm also compared with the gaussian blur transiting interpolation-based algorithm and box blur transiting interpolation based algorithm. They were also compared with open source projects called ImageMagick and OpenCV. The result shows that the area average interpolation-based algorithm is faster than the other two algorithms without affecting picture quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In servers for mobile devices, the generating speed for thumbnails should be fast and efficient. Previous research focused on the visual quality of thumbnails. In this paper, an interpolation-based thumbnail generation method is proposed to accelerate the generation processes. The proposed algorithm was based on area average sampling interpolation, which an improved sampling method to reduce the amount of sample data. The algorithm also compared with the gaussian blur transiting interpolation-based algorithm and box blur transiting interpolation based algorithm. They were also compared with open source projects called ImageMagick and OpenCV. The result shows that the area average interpolation-based algorithm is faster than the other two algorithms without affecting picture quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In servers for mobile devices, the generating speed for thumbnails should be fast and efficient. Previous research focused on the visual quality of thumbnails. In this paper, an interpolation-based thumbnail generation method is proposed to accelerate the generation processes. The proposed algorithm was based on area average sampling interpolation, which an improved sampling method to reduce the amount of sample data. The algorithm also compared with the gaussian blur transiting interpolation-based algorithm and box blur transiting interpolation based algorithm. They were also compared with open source projects called ImageMagick and OpenCV. The result shows that the area average interpolation-based algorithm is faster than the other two algorithms without affecting picture quality.",
"fno": "08726759",
"keywords": [
"Image Colour Analysis",
"Image Filtering",
"Image Sampling",
"Interpolation",
"Sampling Methods",
"Improved Automatic Thumbnail Generation Algorithm",
"Mobile Devices",
"Visual Quality",
"Interpolation Based Thumbnail Generation Method",
"Generation Processes",
"Area Average Sampling Interpolation",
"Improved Sampling Method",
"Interpolation Based Algorithm",
"Area Average Interpolation Based Algorithm",
"Gaussian Blur Transiting Interpolation Based Algorithm",
"Interpolation",
"Visualization",
"Image Edge Detection",
"Mathematical Model",
"Servers",
"Training",
"Companies",
"Thumbnails",
"Gaussian Blur",
"Box Blur",
"Image Magick",
"Area Average Interpolation"
],
"authors": [
{
"affiliation": "College of Mathematics and Informatics, South China Agricultural University, Guangzhou, 510642, China",
"fullName": "Guifan Weng",
"givenName": "Guifan",
"surname": "Weng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Mathematics and Informatics, South China Agricultural University, Guangzhou, 510642, China",
"fullName": "Chun Yang",
"givenName": "Chun",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Information Science and Technology, Jinan University, Guangzhou, 510632, China",
"fullName": "Jinyi Long",
"givenName": "Jinyi",
"surname": "Long",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Mathematics and Informatics, South China Agricultural University, Guangzhou, 510642, China",
"fullName": "Shanying Chen",
"givenName": "Shanying",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ithings-greencom-cpscom-smartdata",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "706-711",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7975-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08726709",
"articleId": "1axfqsiwQr6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08726651",
"articleId": "1axftoKUHeg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2008/2174/0/04761023",
"title": "Enhanced ResolutionAaware Fitting algorithm using interpolation operator",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761023/12OmNAle6OK",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-euc/2013/5088/0/06832165",
"title": "Improved Block Kalman Filter for Degraded Image Restoration",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-euc/2013/06832165/12OmNBRbksD",
"parentPublication": {
"id": "proceedings/hpcc-euc/2013/5088/0",
"title": "2013 IEEE International Conference on High Performance Computing and Communications (HPCC) & 2013 IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2014/6636/0/6636a308",
"title": "Improved Bilinear Interpolation Method for Image Fast Processing",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2014/6636a308/12OmNBTaww0",
"parentPublication": {
"id": "proceedings/icicta/2014/6636/0",
"title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mines/2010/4258/0/4258a081",
"title": "An Edge-Adaptive Interpolation Algorithm for Super-Resolution Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/mines/2010/4258a081/12OmNBpVQdq",
"parentPublication": {
"id": "proceedings/mines/2010/4258/0",
"title": "Multimedia Information Networking and Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2011/4501/0/4501a071",
"title": "A New Image Interpolation Algorithm Based on Pulse-Coupled Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2011/4501a071/12OmNscOUeC",
"parentPublication": {
"id": "proceedings/iccis/2011/4501/0",
"title": "2011 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmedia/2009/3693/0/3693a033",
"title": "Advanced Bilinear Image Interpolation Based on Edge Features",
"doi": null,
"abstractUrl": "/proceedings-article/mmedia/2009/3693a033/12OmNvEQsf5",
"parentPublication": {
"id": "proceedings/mmedia/2009/3693/0",
"title": "Advances in Multimedia, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/his/2008/3326/0/3326a957",
"title": "Enhanced Edge-Weighted Image Interpolation Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/his/2008/3326a957/12OmNwbLVk1",
"parentPublication": {
"id": "proceedings/his/2008/3326/0",
"title": "Hybrid Intelligent Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a580",
"title": "Principal Components Analysis-Based Edge-Directed Image Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a580/12OmNwcl7CN",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800o4001",
"title": "FeatureFlow: Robust Video Interpolation via Structure-to-Texture Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800o4001/1m3ocAZLobu",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2020/8666/0/866600a001",
"title": "A Comparative Study Of Improved Kriging And Distance Power Inverse Surface Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2020/866600a001/1wRIyi1qzNm",
"parentPublication": {
"id": "proceedings/icicta/2020/8666/0",
"title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwErpHu",
"title": "Requirements Engineering Visualization, First International Workshop on",
"acronym": "rev",
"groupId": "1001649",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAXxX6v",
"doi": "10.1109/REV.2007.4",
"title": "On Requirements Visualization",
"normalizedTitle": "On Requirements Visualization",
"abstract": "This paper summarizes the typical objectives and process of visualization and highlights the primary areas in which visualization systems and artifacts have been used to support requirements engineering activities to date. The paper suggests that the field has yet to realize some of the benefits that can arise from a well designed and task-oriented information visualization, falling behind other areas of software engineering in which visualization has been used to better effect. By way of an exemplar, the paper proposes the need for a way to visualize the multi-dimensional nature of requirements to help bring about a shared and rapid comprehension on the health of a project's requirements, and so support various diagnostic activities and decision making tasks during software development. It examines how new ways to 'see' the requirements could be developed, based on metaphor and mapping, provides some samples, and outlines a research agenda to explore a vision related to requirements sensing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper summarizes the typical objectives and process of visualization and highlights the primary areas in which visualization systems and artifacts have been used to support requirements engineering activities to date. The paper suggests that the field has yet to realize some of the benefits that can arise from a well designed and task-oriented information visualization, falling behind other areas of software engineering in which visualization has been used to better effect. By way of an exemplar, the paper proposes the need for a way to visualize the multi-dimensional nature of requirements to help bring about a shared and rapid comprehension on the health of a project's requirements, and so support various diagnostic activities and decision making tasks during software development. It examines how new ways to 'see' the requirements could be developed, based on metaphor and mapping, provides some samples, and outlines a research agenda to explore a vision related to requirements sensing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper summarizes the typical objectives and process of visualization and highlights the primary areas in which visualization systems and artifacts have been used to support requirements engineering activities to date. The paper suggests that the field has yet to realize some of the benefits that can arise from a well designed and task-oriented information visualization, falling behind other areas of software engineering in which visualization has been used to better effect. By way of an exemplar, the paper proposes the need for a way to visualize the multi-dimensional nature of requirements to help bring about a shared and rapid comprehension on the health of a project's requirements, and so support various diagnostic activities and decision making tasks during software development. It examines how new ways to 'see' the requirements could be developed, based on metaphor and mapping, provides some samples, and outlines a research agenda to explore a vision related to requirements sensing.",
"fno": "32480011",
"keywords": [
"Formal Specification",
"Visualization Systems",
"Requirements Engineering",
"Task Oriented Information Visualization",
"Software Engineering",
"Diagnostic Activities",
"Decision Making Tasks",
"Data Visualization",
"Mathematical Model",
"Decision Making",
"Software Engineering",
"Conferences",
"Computer Science",
"Programming",
"Humans",
"Impedance",
"Documentation"
],
"authors": [
{
"affiliation": "Pace Univ., New York",
"fullName": "Orlena C.Z. Gotel",
"givenName": "Orlena C.Z.",
"surname": "Gotel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Pace Univ., New York",
"fullName": "Francis T. Marchese",
"givenName": "Francis T.",
"surname": "Marchese",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stephen J. Morris",
"givenName": "Stephen J.",
"surname": "Morris",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "rev",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-10-01T00:00:00",
"pubType": "proceedings",
"pages": "11-11",
"year": "2007",
"issn": null,
"isbn": "0-7695-3248-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "32480001",
"articleId": "12OmNybfr6J",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/re/2013/5765/0/06636720",
"title": "Keeping requirements on track via visual analytics",
"doi": null,
"abstractUrl": "/proceedings-article/re/2013/06636720/12OmNqH9hoK",
"parentPublication": {
"id": "proceedings/re/2013/5765/0",
"title": "2013 IEEE 21st International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/empire/2011/1075/0/06046254",
"title": "Developers want requirements, but their project manager doesn't; and a possibly transcendent Hawthorne effect",
"doi": null,
"abstractUrl": "/proceedings-article/empire/2011/06046254/12OmNqzu6Ui",
"parentPublication": {
"id": "proceedings/empire/2011/1075/0",
"title": "2011 First International Workshop on Empirical Requirements Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2013/5765/0/06636762",
"title": "Visual analytics for software requirements engineering",
"doi": null,
"abstractUrl": "/proceedings-article/re/2013/06636762/12OmNrJ11yp",
"parentPublication": {
"id": "proceedings/re/2013/5765/0",
"title": "2013 IEEE 21st International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2016/4121/0/4121a006",
"title": "Requirements Engineering Visualization: A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/re/2016/4121a006/12OmNyRxFzB",
"parentPublication": {
"id": "proceedings/re/2016/4121/0",
"title": "2016 IEEE 24th International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rev/2009/4104/0/41040046",
"title": "Requirements Engineering Visualization: A Survey on the State-of-the-Art",
"doi": null,
"abstractUrl": "/proceedings-article/rev/2009/41040046/12OmNzIUfQV",
"parentPublication": {
"id": "proceedings/rev/2009/4104/0",
"title": "Requirements Engineering Visualization, First International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2019/03/08254303",
"title": "Requirements Quality Is Quality in Use",
"doi": null,
"abstractUrl": "/magazine/so/2019/03/08254303/13rRUyfbwoS",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2020/7438/0/09218200",
"title": "Does our culture influence requirements engineering activities? Vietnamese practitioners’ view",
"doi": null,
"abstractUrl": "/proceedings-article/re/2020/09218200/1nMQulPtXgc",
"parentPublication": {
"id": "proceedings/re/2020/7438/0",
"title": "2020 IEEE 28th International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/d4re/2020/8344/0/834400a006",
"title": "The Importance of Requirements Engineering for Teaching Large Visualization Courses",
"doi": null,
"abstractUrl": "/proceedings-article/d4re/2020/834400a006/1nWO2yS8hGM",
"parentPublication": {
"id": "proceedings/d4re/2020/8344/0",
"title": "2020 Fourth International Workshop on Learning from Other Disciplines for Requirements Engineering (D4RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a746",
"title": "Literature Review on Visualization in Supply Chain & Decision Making",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a746/1rSRaK4pgt2",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552846",
"title": "A Critical Reflection on Visualization Research: Where Do Decision Making Tasks Hide?",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552846/1xibYOLsNc4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzIl3t3",
"title": "2016 Second Workshop on In-Situ Infrastructures for Enabling Extreme-Scale Analysis and Visualization (ISAV)",
"acronym": "isav",
"groupId": "1818464",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAjO6EE",
"doi": "10.1109/ISAV.2016.014",
"title": "Visualization and Analysis Requirements for In Situ Processing for a Large-Scale Fusion Simulation Code",
"normalizedTitle": "Visualization and Analysis Requirements for In Situ Processing for a Large-Scale Fusion Simulation Code",
"abstract": "In situ techniques have become a very active research area since they have been shown to be an effective way to combat the issues associated with the ever growing gap between computation and I/O bandwidth. In order to take full advantage of in situ techniques with a large-scale simulation code, it is critical to understand the breadth and depth of its analysis requirements. In this paper, we present the results of a survey done with members of the XGC1 fusion simulation code team in order to gather their requirements for analysis and visualization. We look at these requirements from the perspective of in situ processing and present a list of XGC1 analysis tasks performed by its physicists, engineers, and visualization specialists. This analysis of the specific needs and use cases of a single code is important in understanding the nature of the needs that simulations have in terms of data movement and usage for visualization and analysis, now and in the future.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In situ techniques have become a very active research area since they have been shown to be an effective way to combat the issues associated with the ever growing gap between computation and I/O bandwidth. In order to take full advantage of in situ techniques with a large-scale simulation code, it is critical to understand the breadth and depth of its analysis requirements. In this paper, we present the results of a survey done with members of the XGC1 fusion simulation code team in order to gather their requirements for analysis and visualization. We look at these requirements from the perspective of in situ processing and present a list of XGC1 analysis tasks performed by its physicists, engineers, and visualization specialists. This analysis of the specific needs and use cases of a single code is important in understanding the nature of the needs that simulations have in terms of data movement and usage for visualization and analysis, now and in the future.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In situ techniques have become a very active research area since they have been shown to be an effective way to combat the issues associated with the ever growing gap between computation and I/O bandwidth. In order to take full advantage of in situ techniques with a large-scale simulation code, it is critical to understand the breadth and depth of its analysis requirements. In this paper, we present the results of a survey done with members of the XGC1 fusion simulation code team in order to gather their requirements for analysis and visualization. We look at these requirements from the perspective of in situ processing and present a list of XGC1 analysis tasks performed by its physicists, engineers, and visualization specialists. This analysis of the specific needs and use cases of a single code is important in understanding the nature of the needs that simulations have in terms of data movement and usage for visualization and analysis, now and in the future.",
"fno": "07836401",
"keywords": [
"Data Visualisation",
"Large Scale Fusion Simulation Code",
"I O Bandwidth",
"Large Scale Simulation Code",
"XGC 1 Fusion Simulation Code Team",
"XGC 1 Analysis Tasks",
"Visualization Specialists",
"Data Visualization",
"Computational Modeling",
"Data Models",
"Analytical Models",
"Interviews",
"Three Dimensional Displays",
"Tokamak Devices"
],
"authors": [
{
"affiliation": null,
"fullName": "James Kress",
"givenName": "James",
"surname": "Kress",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "David Pugmire",
"givenName": "David",
"surname": "Pugmire",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Scott Klasky",
"givenName": "Scott",
"surname": "Klasky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hank Childs",
"givenName": "Hank",
"surname": "Childs",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isav",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-11-01T00:00:00",
"pubType": "proceedings",
"pages": "45-50",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3872-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07836400",
"articleId": "12OmNvlPkGJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07836402",
"articleId": "12OmNAg7k1E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ldav/2011/0155/0/06092322",
"title": "The ParaView Coprocessing Library: A scalable, general purpose in situ visualization library",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2011/06092322/12OmNAGNCeu",
"parentPublication": {
"id": "proceedings/ldav/2011/0155/0",
"title": "IEEE Symposium on Large Data Analysis and Visualization (LDAV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2014/5666/0/07004275",
"title": "In-situ visualization and computational steering for large-scale simulation of turbulent flows in complex geometries",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2014/07004275/12OmNrMHOiY",
"parentPublication": {
"id": "proceedings/big-data/2014/5666/0",
"title": "2014 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2016/02/mcg2016020005",
"title": "The Tensions of In Situ Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2016/02/mcg2016020005/13rRUILLkIJ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/01/mcg2018010119",
"title": "Optimizing Scientist Time through In Situ Visualization and Analysis",
"doi": null,
"abstractUrl": "/magazine/cg/2018/01/mcg2018010119/13rRUwciPhU",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2016/03/mcg2016030022",
"title": "WarpIV: In Situ Visualization and Analysis of Ion Accelerator Simulations",
"doi": null,
"abstractUrl": "/magazine/cg/2016/03/mcg2016030022/13rRUxbCbsH",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2022/8106/0/810600a538",
"title": "Colza: Enabling Elastic In Situ Visualization for High-performance Computing Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2022/810600a538/1F1W9cR1VcI",
"parentPublication": {
"id": "proceedings/ipdps/2022/8106/0",
"title": "2022 IEEE International Parallel and Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cluster/2022/9856/0/985600a218",
"title": "Hybrid Analysis of Fusion Data for Online Understanding of Complex Science on Extreme Scale Computers",
"doi": null,
"abstractUrl": "/proceedings-article/cluster/2022/985600a218/1HzBsvheqCA",
"parentPublication": {
"id": "proceedings/cluster/2022/9856/0",
"title": "2022 IEEE International Conference on Cluster Computing (CLUSTER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2022/6124/0/612400a182",
"title": "SIM-SITU: A Framework for the Faithful Simulation of in situ Processing",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2022/612400a182/1J6hxD8UJYA",
"parentPublication": {
"id": "proceedings/e-science/2022/6124/0",
"title": "2022 IEEE 18th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2019/2605/0/08944265",
"title": "Low-Overhead In Situ Visualization Using Halo Replay",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2019/08944265/1grOFpiaovK",
"parentPublication": {
"id": "proceedings/ldav/2019/2605/0",
"title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2021/03/09388894",
"title": "<italic>In Situ</italic> Visualization With Temporal Caching",
"doi": null,
"abstractUrl": "/magazine/cs/2021/03/09388894/1smZR2zRFyo",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxHrym1",
"title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"acronym": "asonam",
"groupId": "1002866",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB0FxhW",
"doi": "10.1109/ASONAM.2016.7752429",
"title": "ARC: A pipeline approach enabling large-scale graph visualization",
"normalizedTitle": "ARC: A pipeline approach enabling large-scale graph visualization",
"abstract": "When working with a high volume relational database, is it possible to effectively provide a compact visualization of the tuples in that database? Data visualization techniques very often scale poorly with input volume, hindering attempts at providing a responsive, full picture of the relationships within data. We introduce a method of efficiently visualizing millions of tuples in a two-dimensional constrained space, providing a method for data to be visually analyzed at the tuple level. We achieve this by applying a physics simulation on an embedded network, positioning tuples according to their representative node.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When working with a high volume relational database, is it possible to effectively provide a compact visualization of the tuples in that database? Data visualization techniques very often scale poorly with input volume, hindering attempts at providing a responsive, full picture of the relationships within data. We introduce a method of efficiently visualizing millions of tuples in a two-dimensional constrained space, providing a method for data to be visually analyzed at the tuple level. We achieve this by applying a physics simulation on an embedded network, positioning tuples according to their representative node.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When working with a high volume relational database, is it possible to effectively provide a compact visualization of the tuples in that database? Data visualization techniques very often scale poorly with input volume, hindering attempts at providing a responsive, full picture of the relationships within data. We introduce a method of efficiently visualizing millions of tuples in a two-dimensional constrained space, providing a method for data to be visually analyzed at the tuple level. We achieve this by applying a physics simulation on an embedded network, positioning tuples according to their representative node.",
"fno": "07752429",
"keywords": [
"Data Visualization",
"Layout",
"Motion Pictures",
"Mathematical Model",
"Pipelines",
"Annealing",
"Computational Modeling"
],
"authors": [
{
"affiliation": "Faculty of Science, UOIT, Oshawa, ON, Canada",
"fullName": "Michael Ferron",
"givenName": "Michael",
"surname": "Ferron",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Science, UOIT, Oshawa, ON, Canada",
"fullName": "Ken Q. Pu",
"givenName": "Ken Q.",
"surname": "Pu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Science, UOIT, Oshawa, ON, Canada",
"fullName": "Jaroslaw Szlichta",
"givenName": "Jaroslaw",
"surname": "Szlichta",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "asonam",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1397-1400",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-2846-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07752428",
"articleId": "12OmNwnH4P3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07752430",
"articleId": "12OmNyFU79c",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2016/2303/0/2303a211",
"title": "StoryCake: A Hierarchical Plot Visualization Method for Storytelling in Polar Coordinates",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a211/12OmNBDQbk9",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2015/7568/0/7568a436",
"title": "CosMovis: Semantic Network Visualization by Using Sentiment Words of Movie Review Data",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a436/12OmNBp52wx",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300054",
"title": "Video visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300054/12OmNqFrGtk",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/saner/2017/5501/0/07884641",
"title": "An arc-based approach for visualization of code smells",
"doi": null,
"abstractUrl": "/proceedings-article/saner/2017/07884641/12OmNwpoFIe",
"parentPublication": {
"id": "proceedings/saner/2017/5501/0",
"title": "2017 IEEE 24th International Conference on Software Analysis, Evolution and Reengineering (SANER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2015/7568/0/7568a228",
"title": "A Concurrent Architecture Proposal for Information Visualization Pipeline",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a228/12OmNxZ2Glk",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsia/2017/2198/0/08339086",
"title": "High-dimensional scientific data exploration via cinema",
"doi": null,
"abstractUrl": "/proceedings-article/dsia/2017/08339086/12OmNxymoc7",
"parentPublication": {
"id": "proceedings/dsia/2017/2198/0",
"title": "2017 IEEE Workshop on Data Systems for Interactive Analysis (DSIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc/2014/5079/0/5079a271",
"title": "Cyber-Physical Directory with Optimized Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/dasc/2014/5079a271/12OmNyKrHgS",
"parentPublication": {
"id": "proceedings/dasc/2014/5079/0",
"title": "2014 IEEE 12th International Conference on Dependable, Autonomic and Secure Computing (DASC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2017/5738/0/08031575",
"title": "NetSet: A systematic integration of visualization for analyzing set intersections with network",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031575/12OmNyOHG3r",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1991/2245/0/00175820",
"title": "Deixis and the future of visualization excellence",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1991/00175820/12OmNzcxZji",
"parentPublication": {
"id": "proceedings/visual/1991/2245/0",
"title": "1991 Proceeding Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/01/08951775",
"title": "Enabling Domain Expertise in Scientific Visualization With CinemaScience",
"doi": null,
"abstractUrl": "/magazine/cg/2020/01/08951775/1goLbHeDSaQ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1A9VchbY4Mw",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"acronym": "bibm",
"groupId": "1001586",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1A9VVWEHKeY",
"doi": "10.1109/BIBM52615.2021.9669719",
"title": "A Fast-Processing Pipeline for Three-dimensional Visualization of Acute Ischemic Stroke lesion topography",
"normalizedTitle": "A Fast-Processing Pipeline for Three-dimensional Visualization of Acute Ischemic Stroke lesion topography",
"abstract": "Ischemic stroke is the most common neurological disease. Previous researches have proven that ischemic lesion topography in specific brain structural regions and vascular territories are critical for outcome prediction and personalized treatment plan making. Compared to traditional two-dimensional visualization, computerized three-dimensional visualization provides more complete and interactive information regarding the ischemic lesion topography. In this paper, we propose a fast-processing pipeline for three-dimensional visualization of ischemic lesion topography. The pipeline is able to achieve in average 80.3% similarity with the traditional pipeline in terms of dice score while greatly shorten the processing time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Ischemic stroke is the most common neurological disease. Previous researches have proven that ischemic lesion topography in specific brain structural regions and vascular territories are critical for outcome prediction and personalized treatment plan making. Compared to traditional two-dimensional visualization, computerized three-dimensional visualization provides more complete and interactive information regarding the ischemic lesion topography. In this paper, we propose a fast-processing pipeline for three-dimensional visualization of ischemic lesion topography. The pipeline is able to achieve in average 80.3% similarity with the traditional pipeline in terms of dice score while greatly shorten the processing time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Ischemic stroke is the most common neurological disease. Previous researches have proven that ischemic lesion topography in specific brain structural regions and vascular territories are critical for outcome prediction and personalized treatment plan making. Compared to traditional two-dimensional visualization, computerized three-dimensional visualization provides more complete and interactive information regarding the ischemic lesion topography. In this paper, we propose a fast-processing pipeline for three-dimensional visualization of ischemic lesion topography. The pipeline is able to achieve in average 80.3% similarity with the traditional pipeline in terms of dice score while greatly shorten the processing time.",
"fno": "09669719",
"keywords": [
"Biodiffusion",
"Biomedical MRI",
"Brain",
"Diseases",
"Medical Disorders",
"Medical Image Processing",
"Neurophysiology",
"Patient Treatment",
"Ischemic Lesion Topography",
"Fast Processing Pipeline",
"Three Dimensional Visualization",
"Acute Ischemic Stroke Lesion Topography",
"Common Neurological Disease",
"Specific Brain Structural Regions",
"Two Dimensional Visualization",
"Neurological Diseases",
"Visualization",
"Brain",
"Three Dimensional Displays",
"Conferences",
"Pipelines",
"Surfaces",
"Three Dimensional Visualization Tool",
"Acute Ischemic Stroke",
"Neuroimaging",
"Topography",
"Lesion Segmentation"
],
"authors": [
{
"affiliation": "The Hong Kong Polytechnic University,Department of Health Technology and Informatics,Hong Kong,China",
"fullName": "Huiling Shao",
"givenName": "Huiling",
"surname": "Shao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Hong Kong Polytechnic University,Department of Health Technology and Informatics,Hong Kong,China",
"fullName": "Lawrence Chan",
"givenName": "Lawrence",
"surname": "Chan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The First Affiliated Hospital of Xiamen University,Department of Neurology,Xiamen,China",
"fullName": "Fiona Chen",
"givenName": "Fiona",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The First Affiliated Hospital of Xiamen University,Department of Neurology,Xiamen,China",
"fullName": "Qilin Ma",
"givenName": "Qilin",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The First Affiliated Hospital of Xiamen University,Department of Pharmacy,Xiamen,China",
"fullName": "Zhiyu Shao",
"givenName": "Zhiyu",
"surname": "Shao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Hong Kong Polytechnic University,Department of Health Technology and Informatics,Hong Kong,China",
"fullName": "Heng Du",
"givenName": "Heng",
"surname": "Du",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "3207-3214",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0126-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09669768",
"articleId": "1A9VwRA8rba",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09669330",
"articleId": "1A9VSi18Mfe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ijcnn/2009/3548/0/05178883",
"title": "Predicting final extent of ischemic infarction using an artificial neural network analysis of multiparametric MRI in patients with stroke",
"doi": null,
"abstractUrl": "/proceedings-article/ijcnn/2009/05178883/12OmNAi6vUD",
"parentPublication": {
"id": "proceedings/ijcnn/2009/3548/0",
"title": "Neural Networks, IEEE - INNS - ENNS International Joint Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2011/1612/0/06112486",
"title": "Application of Conditional Logistic Regression method in a prospective effectiveness comparative study among patients with Acute Ischemic Stroke",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2011/06112486/12OmNB9KHri",
"parentPublication": {
"id": "proceedings/bibmw/2011/1612/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295068",
"title": "Online brain tissue classification in multiple sclerosis using a scanner-integrated image analysis pipeline",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295068/12OmNqNXEsX",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2015/6799/0/07359869",
"title": "Deep learning of tissue fate features in acute ischemic stroke",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2015/07359869/12OmNyQGRW8",
"parentPublication": {
"id": "proceedings/bibm/2015/6799/0",
"title": "2015 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545571",
"title": "Fast Skin Lesion Segmentation via Fully Convolutional Network with Residual Architecture and CRF",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545571/17D45VTRopk",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2018/5488/0/08621303",
"title": "Singling out ischemic lesion zones and transplanted mesenchymal stem cells in the rat brain MRI",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2018/08621303/17D45WKWnJS",
"parentPublication": {
"id": "proceedings/bibm/2018/5488/0",
"title": "2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/1/260701a720",
"title": "The Causes Analysis of Ischemic Stroke Transformation into Hemorrhagic Stroke using PLS (partial Least Square)-GA and Swarm Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260701a720/1cYizfffQMU",
"parentPublication": {
"id": "proceedings/compsac/2019/2607/1",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2019/9138/0/08904574",
"title": "CT-To-MR Conditional Generative Adversarial Networks for Ischemic Stroke Lesion Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2019/08904574/1f8N71cs18c",
"parentPublication": {
"id": "proceedings/ichi/2019/9138/0",
"title": "2019 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2019/4617/0/461700a737",
"title": "Towards a Novel Way to Predict Deficits After a Brain Lesion: A Stroke Example",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2019/461700a737/1grPfYer8l2",
"parentPublication": {
"id": "proceedings/bibe/2019/4617/0",
"title": "2019 IEEE 19th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2019/4617/0/461700a957",
"title": "Ischemic Stroke Lesion Prediction in CT Perfusion Scans Using Multiple Parallel U-Nets Following by a Pixel-Level Classifier",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2019/461700a957/1grPlxySRtm",
"parentPublication": {
"id": "proceedings/bibe/2019/4617/0",
"title": "2019 IEEE 19th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1IT0z7XBBgA",
"title": "2022 IEEE 12th Symposium on Large Data Analysis and Visualization (LDAV)",
"acronym": "ldav",
"groupId": "9966414",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IT0CzlpPHy",
"doi": "10.1109/LDAV57265.2022.9966395",
"title": "A Prototype for Pipeline-Composable Task-Based Visualization Algorithms",
"normalizedTitle": "A Prototype for Pipeline-Composable Task-Based Visualization Algorithms",
"abstract": "For next generation platforms, the paradigm of task-based parallelism has the potential to overcome some of the accompanying challenges. This paradigm has already been applied by the visualization community to specific algorithms and problems. However, one advantage of the task-based paradigm-the interleaving of work-should lead to better utilization of resources and ultimately lower execution times if the paradigm is applied to whole pipelines. In order to investigate this potential, we build a prototype framework for composable task-based parallel visualization algorithms. With this we explore the combination of a strictly task-based approach with the addition of a pipeline layer for visualization algorithms. This additional layer eases the composition of larger task-based parallel visualization applications without the need to explicitly define the exact connection in a task graph between algorithms. In this manner, task-based visualization algorithms can be designed towards a common interface, be easily combined, and still benefit from the advantages of the task-based paradigm across algorithm boundaries, such as latency hiding. We explore the design implications of this combination and show initial results of the scalability and the impact of task interleaving on the runtime of exemplary pipelines.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For next generation platforms, the paradigm of task-based parallelism has the potential to overcome some of the accompanying challenges. This paradigm has already been applied by the visualization community to specific algorithms and problems. However, one advantage of the task-based paradigm-the interleaving of work-should lead to better utilization of resources and ultimately lower execution times if the paradigm is applied to whole pipelines. In order to investigate this potential, we build a prototype framework for composable task-based parallel visualization algorithms. With this we explore the combination of a strictly task-based approach with the addition of a pipeline layer for visualization algorithms. This additional layer eases the composition of larger task-based parallel visualization applications without the need to explicitly define the exact connection in a task graph between algorithms. In this manner, task-based visualization algorithms can be designed towards a common interface, be easily combined, and still benefit from the advantages of the task-based paradigm across algorithm boundaries, such as latency hiding. We explore the design implications of this combination and show initial results of the scalability and the impact of task interleaving on the runtime of exemplary pipelines.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For next generation platforms, the paradigm of task-based parallelism has the potential to overcome some of the accompanying challenges. This paradigm has already been applied by the visualization community to specific algorithms and problems. However, one advantage of the task-based paradigm-the interleaving of work-should lead to better utilization of resources and ultimately lower execution times if the paradigm is applied to whole pipelines. In order to investigate this potential, we build a prototype framework for composable task-based parallel visualization algorithms. With this we explore the combination of a strictly task-based approach with the addition of a pipeline layer for visualization algorithms. This additional layer eases the composition of larger task-based parallel visualization applications without the need to explicitly define the exact connection in a task graph between algorithms. In this manner, task-based visualization algorithms can be designed towards a common interface, be easily combined, and still benefit from the advantages of the task-based paradigm across algorithm boundaries, such as latency hiding. We explore the design implications of this combination and show initial results of the scalability and the impact of task interleaving on the runtime of exemplary pipelines.",
"fno": "09966395",
"keywords": [
"Data Visualisation",
"Graph Theory",
"Parallel Processing",
"Composable Task Based Parallel Visualization Algorithms",
"Exemplary Pipelines",
"Pipeline Composable Task Based Visualization Algorithms",
"Prototype Framework",
"Strictly Task Based Approach",
"Task Graph",
"Task Interleaving",
"Task Based Parallelism",
"Visualization Community",
"Visualization",
"Runtime",
"Data Analysis",
"Scalability",
"Pipelines",
"Prototypes",
"Data Visualization",
"Task Based Parallelism",
"Pipeline Composition",
"Scientific Visualization",
"Distributed Parallel",
"HPX"
],
"authors": [
{
"affiliation": "RWTH Aachen University",
"fullName": "Marvin Petersen",
"givenName": "Marvin",
"surname": "Petersen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Kaiserslautern",
"fullName": "Kilian Werner",
"givenName": "Kilian",
"surname": "Werner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Kaiserslautern",
"fullName": "Andrea Schnorr",
"givenName": "Andrea",
"surname": "Schnorr",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "RWTH Aachen University",
"fullName": "Torsten Wolfgang Kuhlen",
"givenName": "Torsten Wolfgang",
"surname": "Kuhlen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Kaiserslautern",
"fullName": "Christoph Garth",
"givenName": "Christoph",
"surname": "Garth",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ldav",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-11",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9156-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1IT0Cw1l3gI",
"name": "pldav202291560-09966395s1-paper2_1012_multimedia.zip",
"size": "34 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pldav202291560-09966395s1-paper2_1012_multimedia.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09966406",
"articleId": "1IT0CHryeNG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09966403",
"articleId": "1IT0Ck3lBg4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/asonam/2016/2846/0/07752429",
"title": "ARC: A pipeline approach enabling large-scale graph visualization",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2016/07752429/12OmNB0FxhW",
"parentPublication": {
"id": "proceedings/asonam/2016/2846/0",
"title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410468",
"title": "Adaptive visualization pipeline decomposition and mapping onto computer networks",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410468/12OmNwkzumu",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vissoft/2007/0599/0/04290712",
"title": "A Visualization for Software Project Awareness and Evolution",
"doi": null,
"abstractUrl": "/proceedings-article/vissoft/2007/04290712/12OmNx5piV4",
"parentPublication": {
"id": "proceedings/vissoft/2007/0599/0",
"title": "2007 4th IEEE International Workshop on Visualizing Software for Understanding and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2013/0174/0/06607895",
"title": "Chemical space visualization using ViFrame",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2013/06607895/12OmNxQOjGi",
"parentPublication": {
"id": "proceedings/icis/2013/0174/0",
"title": "2013 IEEE/ACIS 12th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2015/7568/0/7568a228",
"title": "A Concurrent Architecture Proposal for Information Visualization Pipeline",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a228/12OmNxZ2Glk",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017635",
"title": "Visualization Multi-Pipeline for Communicating Biology",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017635/13rRUILtJmf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07552504",
"title": "Visualization System Requirements for Data Processing Pipeline Design and Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07552504/13rRUxd2aZ6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2016/3593/0/07982324",
"title": "Using OpenDX to Teach the Concept of Visualization Pipeline",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2016/07982324/17D45Wda7h4",
"parentPublication": {
"id": "proceedings/cse-euc/2016/3593/0",
"title": "2016 19th IEEE Intl Conference on Computational Science and Engineering (CSE), IEEE 14th Intl Conference on Embedded and Ubiquitous Computing (EUC), and 15th Intl Symposium on Distributed Computing and Applications for Business Engineering (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/03/08700321",
"title": "Demonstrational Interaction for Data Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2019/03/08700321/19xNDYbJFT2",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904456",
"title": "Measuring Effects of Spatial Visualization and Domain on Visualization Task Performance: A Comparative Study",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904456/1H1gmktPnLa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBtiU5",
"doi": "10.1109/ICCV.2017.284",
"title": "Compositional Human Pose Regression",
"normalizedTitle": "Compositional Human Pose Regression",
"abstract": "Regression based methods are not performing as well as detection based methods for human pose estimation. A central problem is that the structural information in the pose is not well exploited in the previous regression methods. In this work, we propose a structure-aware regression approach. It adopts a reparameterized pose representation using bones instead of joints. It exploits the joint connection structure to define a compositional loss function that encodes the long range interactions in the pose. It is simple, effective, and general for both 2D and 3D pose estimation in a unified setting. Comprehensive evaluation validates the effectiveness of our approach. It significantly advances the state-of-the-art on Human3.6M [20] and is competitive with state-of-the-art results on MPII [3].",
"abstracts": [
{
"abstractType": "Regular",
"content": "Regression based methods are not performing as well as detection based methods for human pose estimation. A central problem is that the structural information in the pose is not well exploited in the previous regression methods. In this work, we propose a structure-aware regression approach. It adopts a reparameterized pose representation using bones instead of joints. It exploits the joint connection structure to define a compositional loss function that encodes the long range interactions in the pose. It is simple, effective, and general for both 2D and 3D pose estimation in a unified setting. Comprehensive evaluation validates the effectiveness of our approach. It significantly advances the state-of-the-art on Human3.6M [20] and is competitive with state-of-the-art results on MPII [3].",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Regression based methods are not performing as well as detection based methods for human pose estimation. A central problem is that the structural information in the pose is not well exploited in the previous regression methods. In this work, we propose a structure-aware regression approach. It adopts a reparameterized pose representation using bones instead of joints. It exploits the joint connection structure to define a compositional loss function that encodes the long range interactions in the pose. It is simple, effective, and general for both 2D and 3D pose estimation in a unified setting. Comprehensive evaluation validates the effectiveness of our approach. It significantly advances the state-of-the-art on Human3.6M [20] and is competitive with state-of-the-art results on MPII [3].",
"fno": "1032c621",
"keywords": [
"Bone",
"Computer Vision",
"Gaussian Processes",
"Image Motion Analysis",
"Pose Estimation",
"Regression Analysis",
"Stereo Image Processing",
"Compositional Human Pose Regression",
"Regression Based Methods",
"Detection Based Methods",
"Human Pose Estimation",
"Structural Information",
"Structure Aware Regression Approach",
"Reparameterized Pose Representation",
"Joint Connection Structure",
"Compositional Loss Function",
"3 D Pose Estimation",
"Regression Methods",
"2 D Pose Estimation",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Joints",
"Pose Estimation",
"Bones",
"Heating Systems",
"Solid Modeling"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiao Sun",
"givenName": "Xiao",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiaxiang Shang",
"givenName": "Jiaxiang",
"surname": "Shang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shuang Liang",
"givenName": "Shuang",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yichen Wei",
"givenName": "Yichen",
"surname": "Wei",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2621-2630",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032c612",
"articleId": "12OmNvJXeE0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032c631",
"articleId": "12OmNyrIaGv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457b216",
"title": "LCR-Net: Localization-Classification-Regression for Human Pose",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b216/12OmNAq3hFY",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b561",
"title": "3D Human Pose Estimation from a Single Image via Distance Matrix Regression",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b561/12OmNzTH0Sa",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f137",
"title": "2D/3D Pose Estimation and Action Recognition Using Multitask Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f137/17D45VTRovM",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545226",
"title": "Occluded Joints Recovery in 3D Human Pose Estimation based on Distance Matrix",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545226/17D45VTRoxp",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/05/08611390",
"title": "LCR-Net++: Multi-Person 2D and 3D Pose Detection in Natural Images",
"doi": null,
"abstractUrl": "/journal/tp/2020/05/08611390/17D45WK5Aot",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a663",
"title": "Multimodal 3D Human Pose Estimation from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a663/1ezRBte12gw",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093526",
"title": "DeepFuse: An IMU-Aware Network for Real-Time 3D Human Pose Estimation from Multi-View Image",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093526/1jPbhRMiuwU",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f242",
"title": "Weakly-Supervised 3D Human Pose Learning via Multi-View Images in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f242/1m3ncr9inTO",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412127",
"title": "Orthographic Projection Linear Regression for Single Image 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412127/1tmhsKEHUhq",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900n3289",
"title": "CanonPose: Self-Supervised Monocular 3D Human Pose Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900n3289/1yeKBYaUvZu",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy3iFgU",
"doi": "10.1109/ICCV.2017.51",
"title": "Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach",
"normalizedTitle": "Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach",
"abstract": "In this paper, we study the task of 3D human pose estimation in the wild. This task is challenging due to lack of training data, as existing datasets are either in the wild images with 2D pose or in the lab images with 3D pose. We propose a weakly-supervised transfer learning method that uses mixed 2D and 3D labels in a unified deep neutral network that presents two-stage cascaded structure. Our network augments a state-of-the-art 2D pose estimation sub-network with a 3D depth regression sub-network. Unlike previous two stage approaches that train the two sub-networks sequentially and separately, our training is end-to-end and fully exploits the correlation between the 2D pose and depth estimation sub-tasks. The deep features are better learnt through shared representations. In doing so, the 3D pose labels in controlled lab environments are transferred to in the wild images. In addition, we introduce a 3D geometric constraint to regularize the 3D pose prediction, which is effective in the absence of ground truth depth labels. Our method achieves competitive results on both 2D and 3D benchmarks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we study the task of 3D human pose estimation in the wild. This task is challenging due to lack of training data, as existing datasets are either in the wild images with 2D pose or in the lab images with 3D pose. We propose a weakly-supervised transfer learning method that uses mixed 2D and 3D labels in a unified deep neutral network that presents two-stage cascaded structure. Our network augments a state-of-the-art 2D pose estimation sub-network with a 3D depth regression sub-network. Unlike previous two stage approaches that train the two sub-networks sequentially and separately, our training is end-to-end and fully exploits the correlation between the 2D pose and depth estimation sub-tasks. The deep features are better learnt through shared representations. In doing so, the 3D pose labels in controlled lab environments are transferred to in the wild images. In addition, we introduce a 3D geometric constraint to regularize the 3D pose prediction, which is effective in the absence of ground truth depth labels. Our method achieves competitive results on both 2D and 3D benchmarks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we study the task of 3D human pose estimation in the wild. This task is challenging due to lack of training data, as existing datasets are either in the wild images with 2D pose or in the lab images with 3D pose. We propose a weakly-supervised transfer learning method that uses mixed 2D and 3D labels in a unified deep neutral network that presents two-stage cascaded structure. Our network augments a state-of-the-art 2D pose estimation sub-network with a 3D depth regression sub-network. Unlike previous two stage approaches that train the two sub-networks sequentially and separately, our training is end-to-end and fully exploits the correlation between the 2D pose and depth estimation sub-tasks. The deep features are better learnt through shared representations. In doing so, the 3D pose labels in controlled lab environments are transferred to in the wild images. In addition, we introduce a 3D geometric constraint to regularize the 3D pose prediction, which is effective in the absence of ground truth depth labels. Our method achieves competitive results on both 2D and 3D benchmarks.",
"fno": "1032a398",
"keywords": [
"Image Representation",
"Learning Artificial Intelligence",
"Neural Nets",
"Pose Estimation",
"Regression Analysis",
"Stereo Image Processing",
"Weakly Supervised Approach",
"Wild Images",
"Lab Images",
"Weakly Supervised Transfer Learning Method",
"Unified Deep Neutral Network",
"Cascaded Structure",
"3 D Depth Regression Sub Network",
"Depth Estimation",
"3 D Pose Labels",
"Controlled Lab Environments",
"3 D Geometric Constraint",
"3 D Pose Prediction",
"Ground Truth Depth Labels",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Pose Estimation",
"Heating Systems",
"Skeleton",
"Training"
],
"authors": [
{
"affiliation": null,
"fullName": "Xingyi Zhou",
"givenName": "Xingyi",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qixing Huang",
"givenName": "Qixing",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiao Sun",
"givenName": "Xiao",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiangyang Xue",
"givenName": "Xiangyang",
"surname": "Xue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yichen Wei",
"givenName": "Yichen",
"surname": "Wei",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "398-407",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032a388",
"articleId": "12OmNyoAA5L",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032a408",
"articleId": "12OmNBubOTz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a506",
"title": "Monocular 3D Human Pose Estimation in the Wild Using Improved CNN Supervision",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a506/12OmNxdDFF9",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f255",
"title": "3D Human Pose Estimation in the Wild by Adversarial Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f255/17D45WHONlv",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600c923",
"title": "CameraPose: Weakly-Supervised Monocular 3D Human Pose Estimation by Leveraging In-the-wild 2D Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600c923/1L6LywSn5hC",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a405",
"title": "Multi-Person 3D Human Pose Estimation from Monocular Images",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a405/1ezRBMjoJxu",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0897",
"title": "In the Wild Human Pose Estimation Using Explicit 2D Features and Intermediate 3D Representations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0897/1gyrG4eVkti",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998337",
"title": "Weakly Supervised Adversarial Learning for 3D Human Pose Estimation from Point Clouds",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998337/1hrXgdu8Bkk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/09022017",
"title": "Generalizing Monocular 3D Human Pose Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/09022017/1i5mMluVUje",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f242",
"title": "Weakly-Supervised 3D Human Pose Learning via Multi-View Images in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f242/1m3ncr9inTO",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900n3289",
"title": "CanonPose: Self-Supervised Monocular 3D Human Pose Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900n3289/1yeKBYaUvZu",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a042",
"title": "Exemplar Fine-Tuning for 3D Human Model Fitting Towards In-the-Wild 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a042/1zWEdaIowuY",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WHONlv",
"doi": "10.1109/CVPR.2018.00551",
"title": "3D Human Pose Estimation in the Wild by Adversarial Learning",
"normalizedTitle": "3D Human Pose Estimation in the Wild by Adversarial Learning",
"abstract": "Recently, remarkable advances have been achieved in 3D human pose estimation from monocular images because of the powerful Deep Convolutional Neural Networks (DCNNs). Despite their success on large-scale datasets collected in the constrained lab environment, it is difficult to obtain the 3D pose annotations for in-the-wild images. Therefore, 3D human pose estimation in the wild is still a challenge. In this paper, we propose an adversarial learning framework, which distills the 3D human pose structures learned from the fully annotated dataset to in-the-wild images with only 2D pose annotations. Instead of defining hard-coded rules to constrain the pose estimation results, we design a novel multi-source discriminator to distinguish the predicted 3D poses from the ground-truth, which helps to enforce the pose estimator to generate anthropometrically valid poses even with images in the wild. We also observe that a carefully designed information source for the discriminator is essential to boost the performance. Thus, we design a geometric descriptor, which computes the pairwise relative locations and distances between body joints, as a new information source for the discriminator. The efficacy of our adversarial learning framework with the new geometric descriptor has been demonstrated through extensive experiments on widely used public benchmarks. Our approach significantly improves the performance compared with previous state-of-the-art approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recently, remarkable advances have been achieved in 3D human pose estimation from monocular images because of the powerful Deep Convolutional Neural Networks (DCNNs). Despite their success on large-scale datasets collected in the constrained lab environment, it is difficult to obtain the 3D pose annotations for in-the-wild images. Therefore, 3D human pose estimation in the wild is still a challenge. In this paper, we propose an adversarial learning framework, which distills the 3D human pose structures learned from the fully annotated dataset to in-the-wild images with only 2D pose annotations. Instead of defining hard-coded rules to constrain the pose estimation results, we design a novel multi-source discriminator to distinguish the predicted 3D poses from the ground-truth, which helps to enforce the pose estimator to generate anthropometrically valid poses even with images in the wild. We also observe that a carefully designed information source for the discriminator is essential to boost the performance. Thus, we design a geometric descriptor, which computes the pairwise relative locations and distances between body joints, as a new information source for the discriminator. The efficacy of our adversarial learning framework with the new geometric descriptor has been demonstrated through extensive experiments on widely used public benchmarks. Our approach significantly improves the performance compared with previous state-of-the-art approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recently, remarkable advances have been achieved in 3D human pose estimation from monocular images because of the powerful Deep Convolutional Neural Networks (DCNNs). Despite their success on large-scale datasets collected in the constrained lab environment, it is difficult to obtain the 3D pose annotations for in-the-wild images. Therefore, 3D human pose estimation in the wild is still a challenge. In this paper, we propose an adversarial learning framework, which distills the 3D human pose structures learned from the fully annotated dataset to in-the-wild images with only 2D pose annotations. Instead of defining hard-coded rules to constrain the pose estimation results, we design a novel multi-source discriminator to distinguish the predicted 3D poses from the ground-truth, which helps to enforce the pose estimator to generate anthropometrically valid poses even with images in the wild. We also observe that a carefully designed information source for the discriminator is essential to boost the performance. Thus, we design a geometric descriptor, which computes the pairwise relative locations and distances between body joints, as a new information source for the discriminator. The efficacy of our adversarial learning framework with the new geometric descriptor has been demonstrated through extensive experiments on widely used public benchmarks. Our approach significantly improves the performance compared with previous state-of-the-art approaches.",
"fno": "642000f255",
"keywords": [
"Learning Artificial Intelligence",
"Neural Nets",
"Pose Estimation",
"3 D Human Pose Estimation",
"In The Wild Images",
"Adversarial Learning Framework",
"Predicted 3 D Poses",
"Anthropometrically Valid Poses",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Pose Estimation",
"Generators",
"Heating Systems",
"Joints",
"Task Analysis"
],
"authors": [
{
"affiliation": null,
"fullName": "Wei Yang",
"givenName": "Wei",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wanli Ouyang",
"givenName": "Wanli",
"surname": "Ouyang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaolong Wang",
"givenName": "Xiaolong",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jimmy Ren",
"givenName": "Jimmy",
"surname": "Ren",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hongsheng Li",
"givenName": "Hongsheng",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaogang Wang",
"givenName": "Xiaogang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "5255-5264",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000f245",
"articleId": "17D45WKWnHZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000f265",
"articleId": "17D45WODaoU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a506",
"title": "Monocular 3D Human Pose Estimation in the Wild Using Improved CNN Supervision",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a506/12OmNxdDFF9",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a398",
"title": "Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a398/12OmNy3iFgU",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3147",
"title": "Estimating Egocentric 3D Human Pose in the Wild with External Weak Supervision",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3147/1H1ms5RlwuQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a405",
"title": "Multi-Person 3D Human Pose Estimation from Monocular Images",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a405/1ezRBMjoJxu",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0897",
"title": "In the Wild Human Pose Estimation Using Explicit 2D Features and Intermediate 3D Representations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0897/1gyrG4eVkti",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/09022017",
"title": "Generalizing Monocular 3D Human Pose Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/09022017/1i5mMluVUje",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f242",
"title": "Weakly-Supervised 3D Human Pose Learning via Multi-View Images in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f242/1m3ncr9inTO",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212996",
"title": "3D Human Pose Estimation with Adversarial Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212996/1nHRTVvYYVi",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a301",
"title": "SMPLy Benchmarking 3D Human Pose Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a301/1qyxj4t4Z2w",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a042",
"title": "Exemplar Fine-Tuning for 3D Human Model Fitting Towards In-the-Wild 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a042/1zWEdaIowuY",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hQqygVk4TK",
"doi": "10.1109/ICCV.2019.00243",
"title": "HEMlets Pose: Learning Part-Centric Heatmap Triplets for Accurate 3D Human Pose Estimation",
"normalizedTitle": "HEMlets Pose: Learning Part-Centric Heatmap Triplets for Accurate 3D Human Pose Estimation",
"abstract": "Estimating 3D human pose from a single image is a challenging task. This work attempts to address the uncertainty of lifting the detected 2D joints to the 3D space by introducing an intermediate state - Part-Centric Heatmap Triplets (HEMlets), which shortens the gap between the 2D observation and the 3D interpretation. The HEMlets utilize three joint-heatmaps to represent the relative depth information of the end-joints for each skeletal body part. In our approach, a Convolutional Network(ConvNet) is first trained to predict HEMlests from the input image, followed by a volumetric joint-heatmap regression. We leverage on the integral operation to extract the joint locations from the volumetric heatmaps, guaranteeing end-to-end learning. Despite the simplicity of the network design, the quantitative comparisons show a significant performance improvement over the best-of-grade method (by 20% on Human3.6M). The proposed method naturally supports training with \"in-the-wild'' images, where only weakly-annotated relative depth information of skeletal joints is available. This further improves the generalization ability of our model, as validated by qualitative comparisons on outdoor images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Estimating 3D human pose from a single image is a challenging task. This work attempts to address the uncertainty of lifting the detected 2D joints to the 3D space by introducing an intermediate state - Part-Centric Heatmap Triplets (HEMlets), which shortens the gap between the 2D observation and the 3D interpretation. The HEMlets utilize three joint-heatmaps to represent the relative depth information of the end-joints for each skeletal body part. In our approach, a Convolutional Network(ConvNet) is first trained to predict HEMlests from the input image, followed by a volumetric joint-heatmap regression. We leverage on the integral operation to extract the joint locations from the volumetric heatmaps, guaranteeing end-to-end learning. Despite the simplicity of the network design, the quantitative comparisons show a significant performance improvement over the best-of-grade method (by 20% on Human3.6M). The proposed method naturally supports training with \"in-the-wild'' images, where only weakly-annotated relative depth information of skeletal joints is available. This further improves the generalization ability of our model, as validated by qualitative comparisons on outdoor images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Estimating 3D human pose from a single image is a challenging task. This work attempts to address the uncertainty of lifting the detected 2D joints to the 3D space by introducing an intermediate state - Part-Centric Heatmap Triplets (HEMlets), which shortens the gap between the 2D observation and the 3D interpretation. The HEMlets utilize three joint-heatmaps to represent the relative depth information of the end-joints for each skeletal body part. In our approach, a Convolutional Network(ConvNet) is first trained to predict HEMlests from the input image, followed by a volumetric joint-heatmap regression. We leverage on the integral operation to extract the joint locations from the volumetric heatmaps, guaranteeing end-to-end learning. Despite the simplicity of the network design, the quantitative comparisons show a significant performance improvement over the best-of-grade method (by 20% on Human3.6M). The proposed method naturally supports training with \"in-the-wild'' images, where only weakly-annotated relative depth information of skeletal joints is available. This further improves the generalization ability of our model, as validated by qualitative comparisons on outdoor images.",
"fno": "480300c344",
"keywords": [
"Convolutional Neural Nets",
"Image Representation",
"Learning Artificial Intelligence",
"Pose Estimation",
"HE Mlets Pose",
"Accurate 3 D Human Pose Estimation",
"Single Image",
"Detected 2 D Joints",
"Relative Depth Information",
"End Joints",
"Skeletal Body Part",
"Input Image",
"Volumetric Joint Heatmap Regression",
"Joint Locations",
"End To End Learning",
"Network Design",
"Skeletal Joints",
"Outdoor Images",
"Convolutional Network",
"Part Centric Heatmap Triplets",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Pose Estimation",
"Task Analysis",
"Space Heating",
"Training"
],
"authors": [
{
"affiliation": "Cloudream Technology Co.. Ltd",
"fullName": "Kun Zhou",
"givenName": "Kun",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen Research Institute of Big Data. the Chinese University of Hong Kong. Shenzhen",
"fullName": "Xiaoguang Han",
"givenName": "Xiaoguang",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cloudream Technology Co.. Ltd",
"fullName": "Nianjuan Jiang",
"givenName": "Nianjuan",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "South China University of Technology",
"fullName": "Kui Jia",
"givenName": "Kui",
"surname": "Jia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shenzhen Cloudream Technology Co.. Ltd",
"fullName": "Jiangbo Lu",
"givenName": "Jiangbo",
"surname": "Lu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2344-2353",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300c335",
"articleId": "1hVlrNTxTeU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300c354",
"articleId": "1hVlILCLZPG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2018/6420/0/642000b159",
"title": "Recognizing Human Actions as the Evolution of Pose Estimation Maps",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b159/17D45XERmmd",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671770",
"title": "MH Pose: 3D Human Pose Estimation based on High-quality Heatmap",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671770/1A8jad9bNvy",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042691",
"title": "Human Pose Estimation with Shape Aware Loss",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042691/1KOv0xIemZO",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10093110",
"title": "Bias-Compensated Integral Regression for Human Pose Estimation",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10093110/1M61QMyYhNu",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a677",
"title": "Metric-Scale Truncation-Robust Heatmaps for 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a677/1kecIQrqjdK",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h202",
"title": "Compressed Volumetric Heatmaps for Multi-Person 3D Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h202/1m3nw0MH0o8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/06/09320561",
"title": "HEMlets PoSh: Learning Part-Centric Heatmap Triplets for 3D Human Pose and Shape Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2022/06/09320561/1qkwANyEXq8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09511827",
"title": "Heatmap Regression via Randomized Rounding",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09511827/1vYRGdzUT5u",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a377",
"title": "Respective Volumetric Heatmap Autoencoder for Multi-Person 3D Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a377/1xPsokwn38Y",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900n3259",
"title": "Rethinking the Heatmap Regression for Bottom-up Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900n3259/1yeLuW3yZ0I",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3nJvdaX2U",
"doi": "10.1109/CVPR42600.2020.00227",
"title": "Fusing Wearable IMUs With Multi-View Images for Human Pose Estimation: A Geometric Approach",
"normalizedTitle": "Fusing Wearable IMUs With Multi-View Images for Human Pose Estimation: A Geometric Approach",
"abstract": "We propose to estimate 3D human pose from multi-view images and a few IMUs attached at person's limbs. It operates by firstly detecting 2D poses from the two signals, and then lifting them to the 3D space. We present a geometric approach to reinforce the visual features of each pair of joints based on the IMUs. This notably improves 2D pose estimation accuracy especially when one joint is occluded. We call this approach Orientation Regularized Network (ORN). Then we lift the multi-view 2D poses to the 3D space by an Orientation Regularized Pictorial Structure Model (ORPSM) which jointly minimizes the projection error between the 3D and 2D poses, along with the discrepancy between the 3D pose and IMU orientations. The simple two-step approach reduces the error of the state-of-the-art by a large margin on a public dataset. Our code will be released at https://github.com/microsoft/imu-human-pose-estimation-pytorch.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose to estimate 3D human pose from multi-view images and a few IMUs attached at person's limbs. It operates by firstly detecting 2D poses from the two signals, and then lifting them to the 3D space. We present a geometric approach to reinforce the visual features of each pair of joints based on the IMUs. This notably improves 2D pose estimation accuracy especially when one joint is occluded. We call this approach Orientation Regularized Network (ORN). Then we lift the multi-view 2D poses to the 3D space by an Orientation Regularized Pictorial Structure Model (ORPSM) which jointly minimizes the projection error between the 3D and 2D poses, along with the discrepancy between the 3D pose and IMU orientations. The simple two-step approach reduces the error of the state-of-the-art by a large margin on a public dataset. Our code will be released at https://github.com/microsoft/imu-human-pose-estimation-pytorch.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose to estimate 3D human pose from multi-view images and a few IMUs attached at person's limbs. It operates by firstly detecting 2D poses from the two signals, and then lifting them to the 3D space. We present a geometric approach to reinforce the visual features of each pair of joints based on the IMUs. This notably improves 2D pose estimation accuracy especially when one joint is occluded. We call this approach Orientation Regularized Network (ORN). Then we lift the multi-view 2D poses to the 3D space by an Orientation Regularized Pictorial Structure Model (ORPSM) which jointly minimizes the projection error between the 3D and 2D poses, along with the discrepancy between the 3D pose and IMU orientations. The simple two-step approach reduces the error of the state-of-the-art by a large margin on a public dataset. Our code will be released at https://github.com/microsoft/imu-human-pose-estimation-pytorch.",
"fno": "716800c197",
"keywords": [
"Image Fusion",
"Pose Estimation",
"Stereo Image Processing",
"Wearable IMU",
"Multiview Images",
"Human Pose Estimation",
"Geometric Approach",
"Visual Features",
"Estimation Accuracy",
"Multiview 2 D Poses",
"Orientation Regularized Pictorial Structure Model",
"Two Step Approach",
"Orientation Regularized Network",
"ORPSM",
"3 D Space",
"ORN",
"3 D Human Pose",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Pose Estimation",
"Solid Modeling",
"Heating Systems",
"Cameras",
"Training"
],
"authors": [
{
"affiliation": "Southeast University, Nanjing, China",
"fullName": "Zhe Zhang",
"givenName": "Zhe",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia, Beijing, China",
"fullName": "Chunyu Wang",
"givenName": "Chunyu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Southeast University, Nanjing, China",
"fullName": "Wenhu Qin",
"givenName": "Wenhu",
"surname": "Qin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia, Beijing, China",
"fullName": "Wenjun Zeng",
"givenName": "Wenjun",
"surname": "Zeng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2197-2206",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800c186",
"articleId": "1m3noJ4DSSI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800c207",
"articleId": "1m3nEhuNuZa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f759",
"title": "3D Human Pose Estimation = 2D Pose Estimation + Matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f759/12OmNAKcNOh",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a805",
"title": "Generating Multiple Diverse Hypotheses for Human 3D Pose Consistent with 2D Joint Detections",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a805/12OmNxFsmrY",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/05/08341817",
"title": "Robust 3D Human Pose Estimation from Single Images or Video Sequences",
"doi": null,
"abstractUrl": "/journal/tp/2019/05/08341817/13rRUynZ5po",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/05/08611390",
"title": "LCR-Net++: Multi-Person 2D and 3D Pose Detection in Natural Images",
"doi": null,
"abstractUrl": "/journal/tp/2020/05/08611390/17D45WK5Aot",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/05/08611195",
"title": "3D Human Pose Machines with Self-Supervised Learning",
"doi": null,
"abstractUrl": "/journal/tp/2020/05/08611195/17D45Wuc3bt",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545631",
"title": "3D Human Pose Estimation from Deep Multi-View 2D Pose",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545631/17D45WwsQ7m",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b004",
"title": "Improving 3D Human Pose Estimation Via 3D Part Affinity Fields",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b004/18j8Jeil53W",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c222",
"title": "GP2C: Geometric Projection Parameter Consensus for Joint 3D Pose and Focal Length Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c222/1hQquqjcsDe",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300e341",
"title": "Cross View Fusion for 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300e341/1hVlLRoem40",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d276",
"title": "Cross-View Tracking for Multi-Human 3D Pose Estimation at Over 100 FPS",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d276/1m3o6SeRAek",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3nlxOySyY",
"doi": "10.1109/CVPR42600.2020.00689",
"title": "PandaNet: Anchor-Based Single-Shot Multi-Person 3D Pose Estimation",
"normalizedTitle": "PandaNet: Anchor-Based Single-Shot Multi-Person 3D Pose Estimation",
"abstract": "Recently, several deep learning models have been proposed for 3D human pose estimation. Nevertheless, most of these approaches only focus on the single-person case or estimate 3D pose of a few people at high resolution. Furthermore, many applications such as autonomous driving or crowd analysis require pose estimation of a large number of people possibly at low-resolution. In this work, we present PandaNet (Pose estimAtioN and Dectection Anchor-based Network), a new single-shot, anchor-based and multi-person 3D pose estimation approach. The proposed model performs bounding box detection and, for each detected person, 2D and 3D pose regression into a single forward pass. It does not need any post-processing to regroup joints since the network predicts a full 3D pose for each bounding box and allows the pose estimation of a possibly large number of people at low resolution. To manage people overlapping, we introduce a Pose-Aware Anchor Selection strategy. Moreover, as imbalance exists between different people sizes in the image, and joints coordinates have different uncertainties depending on these sizes, we propose a method to automatically optimize weights associated to different people scales and joints for efficient training. PandaNet surpasses previous single-shot methods on several challenging datasets: a multi-person urban virtual but very realistic dataset (JTA Dataset), and two real world 3D multi-person datasets (CMU Panoptic and MuPoTS-3D).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recently, several deep learning models have been proposed for 3D human pose estimation. Nevertheless, most of these approaches only focus on the single-person case or estimate 3D pose of a few people at high resolution. Furthermore, many applications such as autonomous driving or crowd analysis require pose estimation of a large number of people possibly at low-resolution. In this work, we present PandaNet (Pose estimAtioN and Dectection Anchor-based Network), a new single-shot, anchor-based and multi-person 3D pose estimation approach. The proposed model performs bounding box detection and, for each detected person, 2D and 3D pose regression into a single forward pass. It does not need any post-processing to regroup joints since the network predicts a full 3D pose for each bounding box and allows the pose estimation of a possibly large number of people at low resolution. To manage people overlapping, we introduce a Pose-Aware Anchor Selection strategy. Moreover, as imbalance exists between different people sizes in the image, and joints coordinates have different uncertainties depending on these sizes, we propose a method to automatically optimize weights associated to different people scales and joints for efficient training. PandaNet surpasses previous single-shot methods on several challenging datasets: a multi-person urban virtual but very realistic dataset (JTA Dataset), and two real world 3D multi-person datasets (CMU Panoptic and MuPoTS-3D).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recently, several deep learning models have been proposed for 3D human pose estimation. Nevertheless, most of these approaches only focus on the single-person case or estimate 3D pose of a few people at high resolution. Furthermore, many applications such as autonomous driving or crowd analysis require pose estimation of a large number of people possibly at low-resolution. In this work, we present PandaNet (Pose estimAtioN and Dectection Anchor-based Network), a new single-shot, anchor-based and multi-person 3D pose estimation approach. The proposed model performs bounding box detection and, for each detected person, 2D and 3D pose regression into a single forward pass. It does not need any post-processing to regroup joints since the network predicts a full 3D pose for each bounding box and allows the pose estimation of a possibly large number of people at low resolution. To manage people overlapping, we introduce a Pose-Aware Anchor Selection strategy. Moreover, as imbalance exists between different people sizes in the image, and joints coordinates have different uncertainties depending on these sizes, we propose a method to automatically optimize weights associated to different people scales and joints for efficient training. PandaNet surpasses previous single-shot methods on several challenging datasets: a multi-person urban virtual but very realistic dataset (JTA Dataset), and two real world 3D multi-person datasets (CMU Panoptic and MuPoTS-3D).",
"fno": "716800g855",
"keywords": [
"Feature Extraction",
"Image Motion Analysis",
"Image Resolution",
"Learning Artificial Intelligence",
"Object Detection",
"Pose Estimation",
"Regression Analysis",
"Single Person Case",
"Pose Aware Anchor Selection Strategy",
"Deep Learning Models",
"3 D Human Pose Estimation",
"Panda Net",
"Pose Estimation And Dectection Anchor Based Network",
"3 D Pose Regression",
"2 D Pose Regression",
"Anchor Based Single Shot Multiperson 3 D Pose Estimation Method",
"Single Forward Pass",
"Single Shot Methods",
"Bounding Box Recognition",
"Two Real World 3 D Multiperson Datasets",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Pose Estimation",
"Heating Systems",
"Image Resolution",
"Solid Modeling",
"Skeleton"
],
"authors": [
{
"affiliation": "CEA LIST Vision and Learning Lab for Scene Analysis; Sorbonne University, CNRS, Institute for Intelligent Systems and Robotics",
"fullName": "Abdallah Benzine",
"givenName": "Abdallah",
"surname": "Benzine",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CEA LIST Vision and Learning Lab for Scene Analysis",
"fullName": "Florian Chabot",
"givenName": "Florian",
"surname": "Chabot",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CEA LIST Vision and Learning Lab for Scene Analysis",
"fullName": "Bertrand Luvison",
"givenName": "Bertrand",
"surname": "Luvison",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CEA LIST Vision and Learning Lab for Scene Analysis",
"fullName": "Quoc Cuong Pham",
"givenName": "Quoc Cuong",
"surname": "Pham",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sorbonne University, CNRS, Institute for Intelligent Systems and Robotics",
"fullName": "Catherine Achard",
"givenName": "Catherine",
"surname": "Achard",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "6855-6864",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800g845",
"articleId": "1m3nc7uIquQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800g865",
"articleId": "1m3o1OqK5Ec",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tp/2020/05/08611390",
"title": "LCR-Net++: Multi-Person 2D and 3D Pose Detection in Natural Images",
"doi": null,
"abstractUrl": "/journal/tp/2020/05/08611390/17D45WK5Aot",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a120",
"title": "Single-Shot Multi-person 3D Pose Estimation from Monocular RGB",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a120/17D45WaTken",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/02/09763389",
"title": "Dual Networks Based 3D Multi-Person Pose Estimation From Monocular Video",
"doi": null,
"abstractUrl": "/journal/tp/2023/02/09763389/1CT4SB4K316",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a405",
"title": "Multi-Person 3D Human Pose Estimation from Monocular Images",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a405/1ezRBMjoJxu",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0132",
"title": "Camera Distance-Aware Top-Down Approach for 3D Multi-Person Pose Estimation From a Single RGB Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0132/1hQqkcNVkc0",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300a793",
"title": "A2J: Anchor-to-Joint Regression Network for 3D Articulated Pose Estimation From a Single Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300a793/1hQqs36tVoQ",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g950",
"title": "Single-Stage Multi-Person Pose Machines",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g950/1hQqs9ApVAc",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600c487",
"title": "Multi-Person 3D Pose Estimation and Tracking in Sports",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600c487/1iTvjhqPdni",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h202",
"title": "Compressed Volumetric Heatmaps for Multi-Person 3D Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h202/1m3nw0MH0o8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a420",
"title": "A practical framework of multi-person 3D human pose estimation with a single RGB camera",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a420/1tnXYy4IAGA",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1nHRQncZfOM",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1nHRTVvYYVi",
"doi": "10.1109/ICVRV47840.2019.00024",
"title": "3D Human Pose Estimation with Adversarial Learning",
"normalizedTitle": "3D Human Pose Estimation with Adversarial Learning",
"abstract": "In recent years, due to the deep learning and the development of computer vision, great progress has been made in the 3D human pose estimation from RGB images. However, due to the lack of depth information in RGB images, this task still faces great challenges. In this paper, we propose a method of adversarial learning to estimate the 3D pose of the human body. Our framework consists of two parts, a pose generator and a discriminator. Using the 3D pose descriptor, we designed for adversarial learning can effectively increase the accuracy and visual effect of 3D pose estimation results. We performed ablation experiments on the public dataset, which is a good improvement compared to our baseline.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years, due to the deep learning and the development of computer vision, great progress has been made in the 3D human pose estimation from RGB images. However, due to the lack of depth information in RGB images, this task still faces great challenges. In this paper, we propose a method of adversarial learning to estimate the 3D pose of the human body. Our framework consists of two parts, a pose generator and a discriminator. Using the 3D pose descriptor, we designed for adversarial learning can effectively increase the accuracy and visual effect of 3D pose estimation results. We performed ablation experiments on the public dataset, which is a good improvement compared to our baseline.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years, due to the deep learning and the development of computer vision, great progress has been made in the 3D human pose estimation from RGB images. However, due to the lack of depth information in RGB images, this task still faces great challenges. In this paper, we propose a method of adversarial learning to estimate the 3D pose of the human body. Our framework consists of two parts, a pose generator and a discriminator. Using the 3D pose descriptor, we designed for adversarial learning can effectively increase the accuracy and visual effect of 3D pose estimation results. We performed ablation experiments on the public dataset, which is a good improvement compared to our baseline.",
"fno": "09212996",
"keywords": [
"Computer Vision",
"Image Colour Analysis",
"Learning Artificial Intelligence",
"Pose Estimation",
"Stereo Image Processing",
"3 D Human Pose Estimation",
"Adversarial Learning",
"Deep Learning",
"RGB Images",
"Human Body",
"3 D Pose Descriptor",
"Computer Vision",
"Pose Generator",
"Pose Discriminator",
"Visual Effect",
"Ablation Experiments",
"Public Dataset",
"Three Dimensional Displays",
"Generators",
"Pose Estimation",
"Two Dimensional Displays",
"Joints",
"Heating Systems",
"Training",
"Computer Vision",
"Image Process",
"3 D Human Pose Estimation"
],
"authors": [
{
"affiliation": "Beihang University, Qingdao, China",
"fullName": "Wenming Meng",
"givenName": "Wenming",
"surname": "Meng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University, Qingdao, China",
"fullName": "Tao Hu",
"givenName": "Tao",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University, Beijing , China",
"fullName": "Shuai Li",
"givenName": "Shuai",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-11-01T00:00:00",
"pubType": "proceedings",
"pages": "93-99",
"year": "2019",
"issn": "2375-141X",
"isbn": "978-1-7281-4752-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09212869",
"articleId": "1nHRV6vJoNW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09212946",
"articleId": "1nHRSx20hQA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f759",
"title": "3D Human Pose Estimation = 2D Pose Estimation + Matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f759/12OmNAKcNOh",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f255",
"title": "3D Human Pose Estimation in the Wild by Adversarial Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f255/17D45WHONlv",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545631",
"title": "3D Human Pose Estimation from Deep Multi-View 2D Pose",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545631/17D45WwsQ7m",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b477",
"title": "3D Human Pose Estimation With 2D Marginal Heatmaps",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b477/18j8NpOOKn6",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998337",
"title": "Weakly Supervised Adversarial Learning for 3D Human Pose Estimation from Point Clouds",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998337/1hrXgdu8Bkk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c825",
"title": "Explicit Spatiotemporal Joint Relation Learning for Tracking Human Pose",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c825/1i5mpre7N72",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093316",
"title": "3D Hand Pose Estimation with Disentangled Cross-Modal Latent Space",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093316/1jPbFBfZZAI",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093595",
"title": "Lightweight 3D Human Pose Estimation Network Training Using Teacher-Student Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093595/1jPbufNauvC",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412126",
"title": "PEAN: 3D Hand Pose Estimation Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412126/1tmizxbU8Cs",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413204",
"title": "On the Robustness of 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413204/1tmjFsmvUPu",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1qyxi3OgORy",
"title": "2020 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qyxlY5L8jK",
"doi": "10.1109/3DV50981.2020.00100",
"title": "Error Bounds of Projection Models in Weakly Supervised 3D Human Pose Estimation",
"normalizedTitle": "Error Bounds of Projection Models in Weakly Supervised 3D Human Pose Estimation",
"abstract": "The current state-of-the-art in monocular 3D human pose estimation is heavily influenced by weakly supervised methods. These allow 2D labels to be used to learn effective 3D human pose recovery either directly from images or via 2D-to-3D pose uplifting. In this paper we present a detailed analysis of the most commonly used simplified projection models, which relate the estimated 3D pose representation to 2D labels: normalized perspective and weak perspective projections. Specifically, we derive theoretical lower bound errors for those projection models under the commonly used mean per-joint position error (MPJPE). Additionally, we show how the normalized perspective projection can be replaced to avoid this guaranteed minimal error. We evaluate the derived lower bounds on the most commonly used 3D human pose estimation benchmark datasets. Our results show that both projection models lead to an inherent minimal error between 19.3mm and 54.7mm, even after alignment in position and scale. This is a considerable share when comparing with recent state-of-the-art results. Our paper thus establishes a theoretical baseline that shows the importance of suitable projection models in weakly supervised 3D human pose estimation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The current state-of-the-art in monocular 3D human pose estimation is heavily influenced by weakly supervised methods. These allow 2D labels to be used to learn effective 3D human pose recovery either directly from images or via 2D-to-3D pose uplifting. In this paper we present a detailed analysis of the most commonly used simplified projection models, which relate the estimated 3D pose representation to 2D labels: normalized perspective and weak perspective projections. Specifically, we derive theoretical lower bound errors for those projection models under the commonly used mean per-joint position error (MPJPE). Additionally, we show how the normalized perspective projection can be replaced to avoid this guaranteed minimal error. We evaluate the derived lower bounds on the most commonly used 3D human pose estimation benchmark datasets. Our results show that both projection models lead to an inherent minimal error between 19.3mm and 54.7mm, even after alignment in position and scale. This is a considerable share when comparing with recent state-of-the-art results. Our paper thus establishes a theoretical baseline that shows the importance of suitable projection models in weakly supervised 3D human pose estimation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The current state-of-the-art in monocular 3D human pose estimation is heavily influenced by weakly supervised methods. These allow 2D labels to be used to learn effective 3D human pose recovery either directly from images or via 2D-to-3D pose uplifting. In this paper we present a detailed analysis of the most commonly used simplified projection models, which relate the estimated 3D pose representation to 2D labels: normalized perspective and weak perspective projections. Specifically, we derive theoretical lower bound errors for those projection models under the commonly used mean per-joint position error (MPJPE). Additionally, we show how the normalized perspective projection can be replaced to avoid this guaranteed minimal error. We evaluate the derived lower bounds on the most commonly used 3D human pose estimation benchmark datasets. Our results show that both projection models lead to an inherent minimal error between 19.3mm and 54.7mm, even after alignment in position and scale. This is a considerable share when comparing with recent state-of-the-art results. Our paper thus establishes a theoretical baseline that shows the importance of suitable projection models in weakly supervised 3D human pose estimation.",
"fno": "812800a898",
"keywords": [
"Pose Estimation",
"Solid Modelling",
"Supervised Learning",
"Weakly Supervised Methods",
"Weak Perspective Projections",
"Theoretical Lower Bound Errors",
"Mean Per Joint Position Error",
"Normalized Perspective Projection",
"Guaranteed Minimal Error",
"Inherent Minimal Error",
"Projection Models",
"3 D Human Pose Estimation Benchmark Datasets",
"3 D Human Pose Recovery",
"Monocular 3 D Human Pose Estimation",
"Weakly Supervised 3 D Human Pose Estimation",
"Normalized Perspective Projections",
"2 D Labels",
"2 D To 3 D Pose Uplifting",
"3 D Pose Representation",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Solid Modeling",
"Image Reconstruction",
"Cameras",
"Pose Estimation",
"Task Analysis",
"3 D Human Pose Estimation",
"Projection Models"
],
"authors": [
{
"affiliation": "University of Augsburg",
"fullName": "Nikolas Klug",
"givenName": "Nikolas",
"surname": "Klug",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Augsburg",
"fullName": "Moritz Einfalt",
"givenName": "Moritz",
"surname": "Einfalt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Augsburg",
"fullName": "Stephan Brehm",
"givenName": "Stephan",
"surname": "Brehm",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Augsburg",
"fullName": "Rainer Lienhart",
"givenName": "Rainer",
"surname": "Lienhart",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "898-907",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-8128-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "812800a889",
"articleId": "1qyxoMsRnoY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "812800a908",
"articleId": "1qyxngcCckE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f759",
"title": "3D Human Pose Estimation = 2D Pose Estimation + Matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f759/12OmNAKcNOh",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a398",
"title": "Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a398/12OmNy3iFgU",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/05/08611195",
"title": "3D Human Pose Machines with Self-Supervised Learning",
"doi": null,
"abstractUrl": "/journal/tp/2020/05/08611195/17D45Wuc3bt",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a405",
"title": "Multi-Person 3D Human Pose Estimation from Monocular Images",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a405/1ezRBMjoJxu",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c222",
"title": "GP2C: Geometric Projection Parameter Consensus for Joint 3D Pose and Focal Length Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c222/1hQquqjcsDe",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300a743",
"title": "Distill Knowledge From NRSfM for Weakly Supervised 3D Pose Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300a743/1hQqw3JXtoA",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998337",
"title": "Weakly Supervised Adversarial Learning for 3D Human Pose Estimation from Point Clouds",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998337/1hrXgdu8Bkk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c197",
"title": "Fusing Wearable IMUs With Multi-View Images for Human Pose Estimation: A Geometric Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c197/1m3nJvdaX2U",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g906",
"title": "Multiview-Consistent Semi-Supervised Learning for 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g906/1m3osbiRJSw",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412127",
"title": "Orthographic Projection Linear Regression for Single Image 3D Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412127/1tmhsKEHUhq",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCaLEnE",
"title": "2014 International Conference on Computer & Communication Engineering (ICCCE)",
"acronym": "iccce",
"groupId": "1002119",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNA0MZ04",
"doi": "10.1109/ICCCE.2014.16",
"title": "Template Matching Techniques for Iris Recognition System",
"normalizedTitle": "Template Matching Techniques for Iris Recognition System",
"abstract": "Security and authentication is one of the major parts of our daily life. Iris is one of the most reliable forms of identification object in the human body. To develop an iris authentication algorithm for personal identification, this paper describe techniques for matching two bitwise biometric template. The experimental result shows that the developed algorithm has good performance to check whether two templates are generated from same iris or not. The algorithm performs perfect recognition on a set of 94 eye images courtesy of The Chinese Academy of Sciences -- Institute of Automation (CASIA). Algorithm showed 0% false accepted and 4% false rejection rate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Security and authentication is one of the major parts of our daily life. Iris is one of the most reliable forms of identification object in the human body. To develop an iris authentication algorithm for personal identification, this paper describe techniques for matching two bitwise biometric template. The experimental result shows that the developed algorithm has good performance to check whether two templates are generated from same iris or not. The algorithm performs perfect recognition on a set of 94 eye images courtesy of The Chinese Academy of Sciences -- Institute of Automation (CASIA). Algorithm showed 0% false accepted and 4% false rejection rate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Security and authentication is one of the major parts of our daily life. Iris is one of the most reliable forms of identification object in the human body. To develop an iris authentication algorithm for personal identification, this paper describe techniques for matching two bitwise biometric template. The experimental result shows that the developed algorithm has good performance to check whether two templates are generated from same iris or not. The algorithm performs perfect recognition on a set of 94 eye images courtesy of The Chinese Academy of Sciences -- Institute of Automation (CASIA). Algorithm showed 0% false accepted and 4% false rejection rate.",
"fno": "7635a009",
"keywords": [
"Iris Recognition",
"Histograms",
"Computers",
"Abstracts",
"Biomedical Imaging",
"Image Processing",
"Template Matching",
"Iris",
"Iris Recognition",
"Biometric Identification",
"Image Processing"
],
"authors": [
{
"affiliation": null,
"fullName": "Umme T. Tania",
"givenName": "Umme T.",
"surname": "Tania",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sheikh M.A. Motakabber",
"givenName": "Sheikh M.A.",
"surname": "Motakabber",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Muhammad I. Ibrahimy",
"givenName": "Muhammad I.",
"surname": "Ibrahimy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccce",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "9-11",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-7635-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7635a005",
"articleId": "12OmNxGj9Ik",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7635a012",
"articleId": "12OmNyv7mty",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iih-msp/2008/3278/0/3278a183",
"title": "Iris Recognition Based on Matching Pursuits",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2008/3278a183/12OmNAolHaj",
"parentPublication": {
"id": "proceedings/iih-msp/2008/3278/0",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2010/4062/0/4062a209",
"title": "Error Correction on IRIS Biometric Template Using Reed Solomon Codes",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2010/4062a209/12OmNAsBFLo",
"parentPublication": {
"id": "proceedings/ams/2010/4062/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccct/2012/3149/0/06394726",
"title": "A Novel Approach to Minimize the Impact of Non Ideal Samples in Iris Recognition System",
"doi": null,
"abstractUrl": "/proceedings-article/iccct/2012/06394726/12OmNBSSVcv",
"parentPublication": {
"id": "proceedings/iccct/2012/3149/0",
"title": "2012 3rd International Conference on Computer and Communication Technology (ICCCT 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/psivt/2010/4285/0/4285a070",
"title": "Bit Reliability-driven Template Matching in Iris Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/psivt/2010/4285a070/12OmNrAv3ZR",
"parentPublication": {
"id": "proceedings/psivt/2010/4285/0",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a527",
"title": "A Ground Truth for Iris Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a527/12OmNvDI44g",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/synasc/2009/3964/0/3964a384",
"title": "Exploring New Directions in Iris Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/synasc/2009/3964a384/12OmNx6xHoH",
"parentPublication": {
"id": "proceedings/synasc/2009/3964/0",
"title": "2009 11th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2008/3382/2/3382b340",
"title": "A Novel Template Protection Algorithm for Iris Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382b340/12OmNxETaok",
"parentPublication": {
"id": "proceedings/isda/2008/3382/2",
"title": "2008 Eighth International Conference on Intelligent Systems Design and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239214",
"title": "Analysis of template aging in iris biometrics",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239214/12OmNxGSmbQ",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/socpar/2009/3879/0/3879a736",
"title": "Iris Biometric Cryptography for Identity Document",
"doi": null,
"abstractUrl": "/proceedings-article/socpar/2009/3879a736/12OmNyQ7FFA",
"parentPublication": {
"id": "proceedings/socpar/2009/3879/0",
"title": "Soft Computing and Pattern Recognition, International Conference of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2009/3557/3/3557e316",
"title": "A New Localization Method for Iris Recognition Based on Angular Integral Projection Function",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2009/3557e316/12OmNz6iOpM",
"parentPublication": {
"id": "proceedings/etcs/2009/3557/3",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKJiaV",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB0Fxi6",
"doi": "10.1109/ICPR.2010.58",
"title": "On the Fusion of Periocular and Iris Biometrics in Non-ideal Imagery",
"normalizedTitle": "On the Fusion of Periocular and Iris Biometrics in Non-ideal Imagery",
"abstract": "Human recognition based on the iris biometric is severely impacted when encountering non-ideal images of the eye characterized by occluded irises, motion and spatial blur, poor contrast, and illumination artifacts. This paper discusses the use of the periocular region surrounding the iris, along with the iris texture patterns, in order to improve the overall recognition performance in such images. Periocular texture is extracted from a small, fixed region of the skin surrounding the eye. Experiments on the images extracted from the Near Infra-Red (NIR) face videos of the Multi Biometric Grand Challenge (MBGC) dataset demonstrate that valuable information is contained in the periocular region and it can be fused with the iris texture to improve the overall identification accuracy in non-ideal situations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Human recognition based on the iris biometric is severely impacted when encountering non-ideal images of the eye characterized by occluded irises, motion and spatial blur, poor contrast, and illumination artifacts. This paper discusses the use of the periocular region surrounding the iris, along with the iris texture patterns, in order to improve the overall recognition performance in such images. Periocular texture is extracted from a small, fixed region of the skin surrounding the eye. Experiments on the images extracted from the Near Infra-Red (NIR) face videos of the Multi Biometric Grand Challenge (MBGC) dataset demonstrate that valuable information is contained in the periocular region and it can be fused with the iris texture to improve the overall identification accuracy in non-ideal situations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Human recognition based on the iris biometric is severely impacted when encountering non-ideal images of the eye characterized by occluded irises, motion and spatial blur, poor contrast, and illumination artifacts. This paper discusses the use of the periocular region surrounding the iris, along with the iris texture patterns, in order to improve the overall recognition performance in such images. Periocular texture is extracted from a small, fixed region of the skin surrounding the eye. Experiments on the images extracted from the Near Infra-Red (NIR) face videos of the Multi Biometric Grand Challenge (MBGC) dataset demonstrate that valuable information is contained in the periocular region and it can be fused with the iris texture to improve the overall identification accuracy in non-ideal situations.",
"fno": "4109a201",
"keywords": [
"Periocular Biometrics",
"Non Ideal Iris Recognition",
"Local Binary Patterns"
],
"authors": [
{
"affiliation": null,
"fullName": "Damon L. Woodard",
"givenName": "Damon L.",
"surname": "Woodard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shrinivas Pundlik",
"givenName": "Shrinivas",
"surname": "Pundlik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Philip Miller",
"givenName": "Philip",
"surname": "Miller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Raghavender Jillela",
"givenName": "Raghavender",
"surname": "Jillela",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Arun Ross",
"givenName": "Arun",
"surname": "Ross",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "201-204",
"year": "2010",
"issn": "1051-4651",
"isbn": "978-0-7695-4109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4109a197",
"articleId": "12OmNy6qfPR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4109a205",
"articleId": "12OmNwcCISP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2010/7029/0/05544621",
"title": "Periocular region appearance cues for biometric identification",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05544621/12OmNAhxjD5",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2014/7978/0/7978a584",
"title": "Periocular Recognition by Detection of Local Symmetry Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2014/7978a584/12OmNAoUTxG",
"parentPublication": {
"id": "proceedings/sitis/2014/7978/0",
"title": "2014 Tenth International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2013/4990/0/4990a117",
"title": "What Is a \"Good\" Periocular Region for Recognition?",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2013/4990a117/12OmNwDACoQ",
"parentPublication": {
"id": "proceedings/cvprw/2013/4990/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/1/3119a661",
"title": "Iris Biometrics Recognition Application in Security Management",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119a661/12OmNwOnn27",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/1",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2013/5053/0/06475025",
"title": "Periocular biometric recognition using image sets",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06475025/12OmNyTOspv",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460194",
"title": "Human identification from at-a-distance images by simultaneously exploiting iris and periocular features",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460194/12OmNyyeWzr",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2009/09/ttp2009091670",
"title": "Toward Accurate and Fast Iris Segmentation for Iris Biometrics",
"doi": null,
"abstractUrl": "/journal/tp/2009/09/ttp2009091670/13rRUwInvzz",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a904",
"title": "Predicting Gender From Iris Texture May Be Harder Than It Seems",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a904/18j8R2m5PFe",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bracis/2019/4253/0/425300a401",
"title": "Impact of Facial Expressions on the Accuracy of a CNN Performing Periocular Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2019/425300a401/1fHkMjhF3eE",
"parentPublication": {
"id": "proceedings/bracis/2019/4253/0",
"title": "2019 8th Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2019/5227/0/522700a178",
"title": "Simultaneous Iris and Periocular Region Detection Using Coarse Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2019/522700a178/1fHlqIwKGu4",
"parentPublication": {
"id": "proceedings/sibgrapi/2019/5227/0",
"title": "2019 32nd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxjjEc7",
"title": "Parallel Architectures, Algorithms and Programming, International Symposium on",
"acronym": "paap",
"groupId": "1800289",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNynJMXu",
"doi": "10.1109/PAAP.2011.51",
"title": "Utilizing Dark Features for Iris Recognition in Less Constrained Environments",
"normalizedTitle": "Utilizing Dark Features for Iris Recognition in Less Constrained Environments",
"abstract": "We propose a novel approach for iris recognition in less constrained environments that takes into account imaging noise arising from image capture outside the Depth of Field (DOF) of cameras. The proposed approach utilizes stable dark regions in iris images for recognition and does not rely on special hardware or on computationally expensive image restoration algorithms. We have employed a Gabor-based model to establish that stable features, which are not sensitive to defocus, correspond to regions in iris images with low gray-level intensity. We will also present an approach to identify stable bits from the iris code representation, which correspond to dark regions in the enrolled image. Only these stable bits are used for recognition. Experimental results based on 15,000 images with varying degree of defocus show that the proposed method achieves an average recognition performance gain of up to 6% over a conventional method that relies on the entire code representation for iris recognition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel approach for iris recognition in less constrained environments that takes into account imaging noise arising from image capture outside the Depth of Field (DOF) of cameras. The proposed approach utilizes stable dark regions in iris images for recognition and does not rely on special hardware or on computationally expensive image restoration algorithms. We have employed a Gabor-based model to establish that stable features, which are not sensitive to defocus, correspond to regions in iris images with low gray-level intensity. We will also present an approach to identify stable bits from the iris code representation, which correspond to dark regions in the enrolled image. Only these stable bits are used for recognition. Experimental results based on 15,000 images with varying degree of defocus show that the proposed method achieves an average recognition performance gain of up to 6% over a conventional method that relies on the entire code representation for iris recognition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel approach for iris recognition in less constrained environments that takes into account imaging noise arising from image capture outside the Depth of Field (DOF) of cameras. The proposed approach utilizes stable dark regions in iris images for recognition and does not rely on special hardware or on computationally expensive image restoration algorithms. We have employed a Gabor-based model to establish that stable features, which are not sensitive to defocus, correspond to regions in iris images with low gray-level intensity. We will also present an approach to identify stable bits from the iris code representation, which correspond to dark regions in the enrolled image. Only these stable bits are used for recognition. Experimental results based on 15,000 images with varying degree of defocus show that the proposed method achieves an average recognition performance gain of up to 6% over a conventional method that relies on the entire code representation for iris recognition.",
"fno": "4575a110",
"keywords": [
"Iris Recognition",
"Dark Regions",
"Defocused Iris Images"
],
"authors": [
{
"affiliation": null,
"fullName": "Bo Liu",
"givenName": "Bo",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Siew-Kei Lam",
"givenName": "Siew-Kei",
"surname": "Lam",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Thambipillai Srikanthan",
"givenName": "Thambipillai",
"surname": "Srikanthan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Weiqi Yuan",
"givenName": "Weiqi",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "paap",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-12-01T00:00:00",
"pubType": "proceedings",
"pages": "110-114",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4575-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4575a105",
"articleId": "12OmNwErpFV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4575a115",
"articleId": "12OmNCctf7L",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdip/2009/3565/0/3565a421",
"title": "Improving Iris-Based Personal Identification Using Maximum Rectangular Region Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icdip/2009/3565a421/12OmNASILRo",
"parentPublication": {
"id": "proceedings/icdip/2009/3565/0",
"title": "Digital Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2008/3278/0/3278a183",
"title": "Iris Recognition Based on Matching Pursuits",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2008/3278a183/12OmNAolHaj",
"parentPublication": {
"id": "proceedings/iih-msp/2008/3278/0",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imvip/2008/3332/0/3332a170",
"title": "Iris Recognition and Ocular Biometrics - The Salient Features",
"doi": null,
"abstractUrl": "/proceedings-article/imvip/2008/3332a170/12OmNApu5o6",
"parentPublication": {
"id": "proceedings/imvip/2008/3332/0",
"title": "International Machine Vision and Image Processing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206700",
"title": "Image deblurring for less intrusive iris capture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206700/12OmNwCJOMY",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2010/4166/0/4166a147",
"title": "Ordinal Region-Based Representations for Iris Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2010/4166a147/12OmNwG90h5",
"parentPublication": {
"id": "proceedings/cgiv/2010/4166/0",
"title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/synasc/2009/3964/0/3964a384",
"title": "Exploring New Directions in Iris Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/synasc/2009/3964a384/12OmNx6xHoH",
"parentPublication": {
"id": "proceedings/synasc/2009/3964/0",
"title": "2009 11th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2014/7978/0/7978a049",
"title": "Accurate Detection of Non-Iris Occlusions",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2014/7978a049/12OmNz2C1t7",
"parentPublication": {
"id": "proceedings/sitis/2014/7978/0",
"title": "2014 Tenth International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2018/01/07350227",
"title": "Negative Iris Recognition",
"doi": null,
"abstractUrl": "/journal/tq/2018/01/07350227/13rRUxly96T",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/08/ttp2010081529",
"title": "The UBIRIS.v2: A Database of Visible Wavelength Iris Images Captured On-the-Move and At-a-Distance",
"doi": null,
"abstractUrl": "/journal/tp/2010/08/ttp2010081529/13rRUyogGBk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600c296",
"title": "Segmentation-Less and Non-Holistic Deep-Learning Frameworks for Iris Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600c296/1iTvorbo2Na",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKiqc",
"title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)",
"acronym": "bigmm",
"groupId": "1808144",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XeKgyz",
"doi": "10.1109/BigMM.2018.8499061",
"title": "Iris Liveness Detection: A Survey",
"normalizedTitle": "Iris Liveness Detection: A Survey",
"abstract": "Iris is an accurate but vulnerable trait which is widely used in biometric system. To protect iris recognition module, an additional process called liveness detection is integrated into biometric system. The primary objective of this paper is to review the recent progress in iris liveness detection. For this purposes, we categorize iris liveness detection approaches into sensor-level method, which add extra hardware to detect vital signal of subjects, and feature-level method, which use algorithm implemented in software to analysis liveness of the presentation. In order to acquire a better understanding of this field, we describe some presentation attack methods and introduce some competition and dataset for iris liveness detection. To sum up, big achievement of iris liveness detection have been made in last five years, but there is still the need to devote further efforts to build more security biometric systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Iris is an accurate but vulnerable trait which is widely used in biometric system. To protect iris recognition module, an additional process called liveness detection is integrated into biometric system. The primary objective of this paper is to review the recent progress in iris liveness detection. For this purposes, we categorize iris liveness detection approaches into sensor-level method, which add extra hardware to detect vital signal of subjects, and feature-level method, which use algorithm implemented in software to analysis liveness of the presentation. In order to acquire a better understanding of this field, we describe some presentation attack methods and introduce some competition and dataset for iris liveness detection. To sum up, big achievement of iris liveness detection have been made in last five years, but there is still the need to devote further efforts to build more security biometric systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Iris is an accurate but vulnerable trait which is widely used in biometric system. To protect iris recognition module, an additional process called liveness detection is integrated into biometric system. The primary objective of this paper is to review the recent progress in iris liveness detection. For this purposes, we categorize iris liveness detection approaches into sensor-level method, which add extra hardware to detect vital signal of subjects, and feature-level method, which use algorithm implemented in software to analysis liveness of the presentation. In order to acquire a better understanding of this field, we describe some presentation attack methods and introduce some competition and dataset for iris liveness detection. To sum up, big achievement of iris liveness detection have been made in last five years, but there is still the need to devote further efforts to build more security biometric systems.",
"fno": "08499061",
"keywords": [
"Biometrics Access Control",
"Iris Recognition",
"Signal Detection",
"Sensor Level Method",
"Vital Signal Detection",
"Feature Level Method",
"Presentation Attack Methods",
"Security Biometric Systems",
"Iris Recognition Module",
"Iris Liveness Detection",
"Iris Recognition",
"Feature Extraction",
"Lenses",
"Iris",
"Three Dimensional Displays",
"Hardware",
"Lighting",
"Iris Liveness Detection",
"Presentation Attacks",
"Biometrics"
],
"authors": [
{
"affiliation": "University of Chinese Academy of Science, Beijing",
"fullName": "Yangyu Chen",
"givenName": "Yangyu",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Harbin Institute of Technology, Weihai",
"fullName": "Weigang Zhang",
"givenName": "Weigang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bigmm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-5321-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08499461",
"articleId": "17D45VTRoA7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08499081",
"articleId": "17D45WrVgaf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2014/4871/0/06918666",
"title": "MoBio_LivDet: Mobile biometric liveness detection",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2014/06918666/12OmNCga1R2",
"parentPublication": {
"id": "proceedings/avss/2014/4871/0",
"title": "2014 International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2015/7962/0/7962a157",
"title": "An Approach to Iris Contact Lens Detection Based on Deep Image Representations",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2015/7962a157/12OmNqN6R6I",
"parentPublication": {
"id": "proceedings/sibgrapi/2015/7962/0",
"title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2013/5053/0/06475026",
"title": "An experimental study of pupil constriction for liveness detection",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06475026/12OmNrY3Ltw",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733a664",
"title": "Iris Liveness Detection by Relative Distance Comparisons",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733a664/12OmNwuvrWV",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295057",
"title": "Iris liveness detection methods in mobile applications",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295057/12OmNxveNEs",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2018/5188/0/518801a044",
"title": "A Multi-task Convolutional Neural Network for Joint Iris Detection and Presentation Attack Detection",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2018/518801a044/12OmNyoSb8w",
"parentPublication": {
"id": "proceedings/wacvw/2018/5188/0",
"title": "2018 IEEE Winter Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761673",
"title": "Counterfeit iris detection based on texture analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761673/12OmNyuy9L9",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000a685",
"title": "Fusion of Handcrafted and Deep Learning Features for Large-Scale Multiple Iris Presentation Attack Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000a685/17D45WaTkfo",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b701",
"title": "Convolutional Neural Networks for Iris Presentation Attack Detection: Toward Cross-Dataset and Cross-Sensor Generalization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b701/17D45X0yjRj",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a877",
"title": "Iris Presentation Attack Detection Based on Photometric Stereo Features",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a877/18j8Hd7wjK0",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKisJ",
"title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)",
"acronym": "i-span",
"groupId": "1000536",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Xtvpdu",
"doi": "10.1109/I-SPAN.2018.00020",
"title": "Biological Features De-identification in Iris Images",
"normalizedTitle": "Biological Features De-identification in Iris Images",
"abstract": "Biometric authentication has recently become hugely popular in mobile social networks. Iris recognition, a typical biometric authentication technology, has been widely used in many fields. However, it can lead to serious consequences if the iris feature information is utilized by an illegal party. Thus, a critical issue is how to preserve iris features when the human's images are used in mobile social networks. In this paper, we first introduce an approach to identify the iris area in an eye image. By means of differential privacy, we present a de-identification algorithm on the iris area to preserve iris biological features. A remarkable advantage of our approach is that it only conceals the iris biological features but does not change the other parts of the given image. A simulation example is provided to show the effectiveness of our de-identification method at last.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Biometric authentication has recently become hugely popular in mobile social networks. Iris recognition, a typical biometric authentication technology, has been widely used in many fields. However, it can lead to serious consequences if the iris feature information is utilized by an illegal party. Thus, a critical issue is how to preserve iris features when the human's images are used in mobile social networks. In this paper, we first introduce an approach to identify the iris area in an eye image. By means of differential privacy, we present a de-identification algorithm on the iris area to preserve iris biological features. A remarkable advantage of our approach is that it only conceals the iris biological features but does not change the other parts of the given image. A simulation example is provided to show the effectiveness of our de-identification method at last.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Biometric authentication has recently become hugely popular in mobile social networks. Iris recognition, a typical biometric authentication technology, has been widely used in many fields. However, it can lead to serious consequences if the iris feature information is utilized by an illegal party. Thus, a critical issue is how to preserve iris features when the human's images are used in mobile social networks. In this paper, we first introduce an approach to identify the iris area in an eye image. By means of differential privacy, we present a de-identification algorithm on the iris area to preserve iris biological features. A remarkable advantage of our approach is that it only conceals the iris biological features but does not change the other parts of the given image. A simulation example is provided to show the effectiveness of our de-identification method at last.",
"fno": "853400a067",
"keywords": [
"Data Privacy",
"Feature Extraction",
"Iris Recognition",
"Mobile Computing",
"Social Networking Online",
"Biometric Authentication Technology",
"Biological Feature De Identification",
"Differential Privacy",
"Iris Biological Features",
"Eye Image",
"Iris Area",
"Iris Feature Information",
"Iris Recognition",
"Mobile Social Networks",
"Iris Recognition",
"Iris",
"Differential Privacy",
"Privacy",
"Authentication",
"Transforms",
"Iris Image",
"Privacy Preserve",
"Biological Feature"
],
"authors": [
{
"affiliation": null,
"fullName": "Heng Zhang",
"givenName": "Heng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Huan Zhou",
"givenName": "Huan",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wenming Jiao",
"givenName": "Wenming",
"surname": "Jiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jie Shi",
"givenName": "Jie",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qiyan Zang",
"givenName": "Qiyan",
"surname": "Zang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jing Sun",
"givenName": "Jing",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jian Zhang",
"givenName": "Jian",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "i-span",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "67-71",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-8534-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "853400a060",
"articleId": "17D45XdBRQl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "853400a072",
"articleId": "17D45XfSEV1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457g747",
"title": "IRINA: Iris Recognition (Even) in Inaccurately Segmented Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g747/12OmNAHmOs3",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sccc/2011/4689/0/06363408",
"title": "Zernike's Feature Descriptors for Iris Recognition with SVM",
"doi": null,
"abstractUrl": "/proceedings-article/sccc/2011/06363408/12OmNBNM8T2",
"parentPublication": {
"id": "proceedings/sccc/2011/4689/0",
"title": "2011 30th International Conference of the Chilean Computer Science Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761886",
"title": "Cancelable iris biometric",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761886/12OmNBNM90O",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/music/2012/1956/0/4727a120",
"title": "A Privacy-Preserving Biometric Matching Protocol for Iris Codes Verification",
"doi": null,
"abstractUrl": "/proceedings-article/music/2012/4727a120/12OmNCesr3U",
"parentPublication": {
"id": "proceedings/music/2012/1956/0",
"title": "2012 Third FTRA International Conference on Mobile, Ubiquitous, and Intelligent Computing (MUSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206646",
"title": "Cancelable iris biometrics and using Error Correcting Codes to reduce variability in biometric data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206646/12OmNwIHouC",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/artcom/2009/3845/0/3845a796",
"title": "An Approach of Iris Feature Extraction for Personal Identification",
"doi": null,
"abstractUrl": "/proceedings-article/artcom/2009/3845a796/12OmNywxlFT",
"parentPublication": {
"id": "proceedings/artcom/2009/3845/0",
"title": "Advances in Recent Technologies in Communication and Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/act/2009/3915/0/05375821",
"title": "Iris Feature Extraction for Personal Identification Using Lifting Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/act/2009/05375821/13bd1tMztY2",
"parentPublication": {
"id": "proceedings/act/2009/3915/0",
"title": "Advances in Computing, Control, and Telecommunication Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2018/01/07350227",
"title": "Negative Iris Recognition",
"doi": null,
"abstractUrl": "/journal/tq/2018/01/07350227/13rRUxly96T",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issre/2018/8321/0/832100a248",
"title": "Iris Template Protection Based on Randomized Response Technique and Aggregated Block Information",
"doi": null,
"abstractUrl": "/proceedings-article/issre/2018/832100a248/17D45WODaqr",
"parentPublication": {
"id": "proceedings/issre/2018/8321/0",
"title": "2018 IEEE 29th International Symposium on Software Reliability Engineering (ISSRE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2018/5321/0/08499061",
"title": "Iris Liveness Detection: A Survey",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2018/08499061/17D45XeKgyz",
"parentPublication": {
"id": "proceedings/bigmm/2018/5321/0",
"title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqH9hnp",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNASraXC",
"doi": "10.1109/CVPR.2016.391",
"title": "Robust 3D Hand Pose Estimation in Single Depth Images: From Single-View CNN to Multi-View CNNs",
"normalizedTitle": "Robust 3D Hand Pose Estimation in Single Depth Images: From Single-View CNN to Multi-View CNNs",
"abstract": "Articulated hand pose estimation plays an important role in human-computer interaction. Despite the recent progress, the accuracy of existing methods is still not satisfactory, partially due to the difficulty of embedded highdimensional and non-linear regression problem. Different from the existing discriminative methods that regress for the hand pose with a single depth image, we propose to first project the query depth image onto three orthogonal planes and utilize these multi-view projections to regress for 2D heat-maps which estimate the joint positions on each plane. These multi-view heat-maps are then fused to produce final 3D hand pose estimation with learned pose priors. Experiments show that the proposed method largely outperforms state-of-the-art on a challenging dataset. Moreover, a cross-dataset experiment also demonstrates the good generalization ability of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Articulated hand pose estimation plays an important role in human-computer interaction. Despite the recent progress, the accuracy of existing methods is still not satisfactory, partially due to the difficulty of embedded highdimensional and non-linear regression problem. Different from the existing discriminative methods that regress for the hand pose with a single depth image, we propose to first project the query depth image onto three orthogonal planes and utilize these multi-view projections to regress for 2D heat-maps which estimate the joint positions on each plane. These multi-view heat-maps are then fused to produce final 3D hand pose estimation with learned pose priors. Experiments show that the proposed method largely outperforms state-of-the-art on a challenging dataset. Moreover, a cross-dataset experiment also demonstrates the good generalization ability of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Articulated hand pose estimation plays an important role in human-computer interaction. Despite the recent progress, the accuracy of existing methods is still not satisfactory, partially due to the difficulty of embedded highdimensional and non-linear regression problem. Different from the existing discriminative methods that regress for the hand pose with a single depth image, we propose to first project the query depth image onto three orthogonal planes and utilize these multi-view projections to regress for 2D heat-maps which estimate the joint positions on each plane. These multi-view heat-maps are then fused to produce final 3D hand pose estimation with learned pose priors. Experiments show that the proposed method largely outperforms state-of-the-art on a challenging dataset. Moreover, a cross-dataset experiment also demonstrates the good generalization ability of the proposed method.",
"fno": "8851d593",
"keywords": [
"Three Dimensional Displays",
"Heating",
"Pose Estimation",
"Two Dimensional Displays",
"Solid Modeling",
"Robustness"
],
"authors": [
{
"affiliation": null,
"fullName": "Liuhao Ge",
"givenName": "Liuhao",
"surname": "Ge",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hui Liang",
"givenName": "Hui",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Junsong Yuan",
"givenName": "Junsong",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daniel Thalmann",
"givenName": "Daniel",
"surname": "Thalmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3593-3601",
"year": "2016",
"issn": "1063-6919",
"isbn": "978-1-4673-8851-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8851d583",
"articleId": "12OmNvlg8qu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8851d602",
"articleId": "12OmNrJRPg4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f679",
"title": "3D Convolutional Neural Networks for Efficient and Robust Hand Pose Estimation from Single Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f679/12OmNBQC895",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d123",
"title": "Learning Hand Articulations by Hallucinating Heat Distribution",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d123/12OmNCcKQwN",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032e913",
"title": "Learning to Estimate 3D Hand Pose from Single RGB Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e913/12OmNwcl7Bw",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/04/08338122",
"title": "Real-Time 3D Hand Pose Estimation with 3D Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2019/04/08338122/13rRUx0xPJX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f147",
"title": "Dense 3D Regression for Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f147/17D45WaTkeL",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i417",
"title": "Hand PointNet: 3D Hand Pose Estimation Using Point Sets",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i417/17D45XERmmi",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1261",
"title": "Hand Image Understanding via Deep Multi-Task Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1261/1BmIK8fcVt6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c335",
"title": "Aligning Latent Spaces for 3D Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c335/1hVlrNTxTeU",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h111",
"title": "HandVoxNet: Deep Voxel-Based Network for 3D Hand Shape and Pose Estimation From a Single Depth Map",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h111/1m3nfro8U8g",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icess/2020/6466/0/09301562",
"title": "3D Hand Pose Estimation from Single Depth Images with Label Distribution Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icess/2020/09301562/1pVHqDHbObm",
"parentPublication": {
"id": "proceedings/icess/2020/6466/0",
"title": "2020 IEEE International Conference on Embedded Software and Systems (ICESS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBQC895",
"doi": "10.1109/CVPR.2017.602",
"title": "3D Convolutional Neural Networks for Efficient and Robust Hand Pose Estimation from Single Depth Images",
"normalizedTitle": "3D Convolutional Neural Networks for Efficient and Robust Hand Pose Estimation from Single Depth Images",
"abstract": "We propose a simple, yet effective approach for real-time hand pose estimation from single depth images using three-dimensional Convolutional Neural Networks (3D CNNs). Image based features extracted by 2D CNNs are not directly suitable for 3D hand pose estimation due to the lack of 3D spatial information. Our proposed 3D CNN taking a 3D volumetric representation of the hand depth image as input can capture the 3D spatial structure of the input and accurately regress full 3D hand pose in a single pass. In order to make the 3D CNN robust to variations in hand sizes and global orientations, we perform 3D data augmentation on the training data. Experiments show that our proposed 3D CNN based approach outperforms state-of-the-art methods on two challenging hand pose datasets, and is very efficient as our implementation runs at over 215 fps on a standard computer with a single GPU.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a simple, yet effective approach for real-time hand pose estimation from single depth images using three-dimensional Convolutional Neural Networks (3D CNNs). Image based features extracted by 2D CNNs are not directly suitable for 3D hand pose estimation due to the lack of 3D spatial information. Our proposed 3D CNN taking a 3D volumetric representation of the hand depth image as input can capture the 3D spatial structure of the input and accurately regress full 3D hand pose in a single pass. In order to make the 3D CNN robust to variations in hand sizes and global orientations, we perform 3D data augmentation on the training data. Experiments show that our proposed 3D CNN based approach outperforms state-of-the-art methods on two challenging hand pose datasets, and is very efficient as our implementation runs at over 215 fps on a standard computer with a single GPU.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a simple, yet effective approach for real-time hand pose estimation from single depth images using three-dimensional Convolutional Neural Networks (3D CNNs). Image based features extracted by 2D CNNs are not directly suitable for 3D hand pose estimation due to the lack of 3D spatial information. Our proposed 3D CNN taking a 3D volumetric representation of the hand depth image as input can capture the 3D spatial structure of the input and accurately regress full 3D hand pose in a single pass. In order to make the 3D CNN robust to variations in hand sizes and global orientations, we perform 3D data augmentation on the training data. Experiments show that our proposed 3D CNN based approach outperforms state-of-the-art methods on two challenging hand pose datasets, and is very efficient as our implementation runs at over 215 fps on a standard computer with a single GPU.",
"fno": "0457f679",
"keywords": [
"Feature Extraction",
"Image Classification",
"Image Representation",
"Neural Nets",
"Pose Estimation",
"3 D Data Augmentation",
"3 D CNN Based Approach",
"Single GPU",
"Robust Hand Pose Estimation",
"Single Depth Images",
"3 D Spatial Information",
"3 D Volumetric Representation",
"Hand Depth Image",
"3 D Spatial Structure",
"3 D Convolutional Neural Networks",
"2 D CNN",
"Real Time Hand Pose Estimation",
"Image Based Feature Extraction",
"3 D CNN",
"Hand Pose Datasets",
"Three Dimensional Displays",
"Pose Estimation",
"Feature Extraction",
"Two Dimensional Displays",
"Solid Modeling",
"Robustness",
"Real Time Systems"
],
"authors": [
{
"affiliation": null,
"fullName": "Liuhao Ge",
"givenName": "Liuhao",
"surname": "Ge",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hui Liang",
"givenName": "Hui",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Junsong Yuan",
"givenName": "Junsong",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daniel Thalmann",
"givenName": "Daniel",
"surname": "Thalmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "5679-5688",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0457f669",
"articleId": "12OmNCcbE3F",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0457f689",
"articleId": "12OmNBRbkrc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851d593",
"title": "Robust 3D Hand Pose Estimation in Single Depth Images: From Single-View CNN to Multi-View CNNs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d593/12OmNASraXC",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f689",
"title": "Lifting from the Deep: Convolutional 3D Pose Estimation from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f689/12OmNBRbkrc",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032e913",
"title": "Learning to Estimate 3D Hand Pose from Single RGB Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e913/12OmNwcl7Bw",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/04/08338122",
"title": "Real-Time 3D Hand Pose Estimation with 3D Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2019/04/08338122/13rRUx0xPJX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c636",
"title": "Depth-Based 3D Hand Pose Estimation: From Current Achievements to Future Goals",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c636/17D45W2Wyyl",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f079",
"title": "V2V-PoseNet: Voxel-to-Voxel Prediction Network for Accurate 3D Hand and Human Pose Estimation from a Single Depth Map",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f079/17D45WHONoj",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i417",
"title": "Hand PointNet: 3D Hand Pose Estimation Using Point Sets",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i417/17D45XERmmi",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g960",
"title": "SO-HandNet: Self-Organizing Network for 3D Hand Pose Estimation With Semi-Supervised Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g960/1hVljvoseME",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c866",
"title": "3D Hand Pose Estimation from RGB Using Privileged Learning with Depth Data",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c866/1i5mvFudr68",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h111",
"title": "HandVoxNet: Deep Voxel-Based Network for 3D Hand Shape and Pose Estimation From a Single Depth Map",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h111/1m3nfro8U8g",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvHoQq3",
"doi": "10.1109/ICCV.2017.339",
"title": "Robust Hand Pose Estimation during the Interaction with an Unknown Object",
"normalizedTitle": "Robust Hand Pose Estimation during the Interaction with an Unknown Object",
"abstract": "This paper proposes a robust solution for accurate 3D hand pose estimation in the presence of an external object interacting with hands. Our main insight is that the shape of an object causes a configuration of the hand in the form of a hand grasp. Along this line, we simultaneously train deep neural networks using paired depth images. The object-oriented network learns functional grasps from an object perspective, whereas the hand-oriented network explores the details of hand configurations from a hand perspective. The two networks share intermediate observations produced from different perspectives to create a more informed representation. Our system then collaboratively classifies the grasp types and orientation of the hand and further constrains a pose space using these estimates. Finally, we collectively refine the unknown pose parameters to reconstruct the final hand pose. To this end, we conduct extensive evaluations to validate the efficacy of the proposed collaborative learning approach by comparing it with self-generated baselines and the state-of-the-art method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a robust solution for accurate 3D hand pose estimation in the presence of an external object interacting with hands. Our main insight is that the shape of an object causes a configuration of the hand in the form of a hand grasp. Along this line, we simultaneously train deep neural networks using paired depth images. The object-oriented network learns functional grasps from an object perspective, whereas the hand-oriented network explores the details of hand configurations from a hand perspective. The two networks share intermediate observations produced from different perspectives to create a more informed representation. Our system then collaboratively classifies the grasp types and orientation of the hand and further constrains a pose space using these estimates. Finally, we collectively refine the unknown pose parameters to reconstruct the final hand pose. To this end, we conduct extensive evaluations to validate the efficacy of the proposed collaborative learning approach by comparing it with self-generated baselines and the state-of-the-art method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a robust solution for accurate 3D hand pose estimation in the presence of an external object interacting with hands. Our main insight is that the shape of an object causes a configuration of the hand in the form of a hand grasp. Along this line, we simultaneously train deep neural networks using paired depth images. The object-oriented network learns functional grasps from an object perspective, whereas the hand-oriented network explores the details of hand configurations from a hand perspective. The two networks share intermediate observations produced from different perspectives to create a more informed representation. Our system then collaboratively classifies the grasp types and orientation of the hand and further constrains a pose space using these estimates. Finally, we collectively refine the unknown pose parameters to reconstruct the final hand pose. To this end, we conduct extensive evaluations to validate the efficacy of the proposed collaborative learning approach by comparing it with self-generated baselines and the state-of-the-art method.",
"fno": "1032d142",
"keywords": [
"Feature Extraction",
"Groupware",
"Image Classification",
"Learning Artificial Intelligence",
"Neural Nets",
"Pose Estimation",
"Hand Grasp",
"Deep Neural Networks",
"Paired Depth Images",
"Object Oriented Network",
"Functional Grasps",
"Hand Oriented Network",
"Hand Configurations",
"Grasp Types",
"Robust Hand Pose Estimation",
"Robust Solution",
"Hands",
"Collaborative Learning",
"Pose Estimation",
"Three Dimensional Displays",
"Shape",
"Solid Modeling",
"Heating Systems",
"Robustness",
"Neural Networks"
],
"authors": [
{
"affiliation": null,
"fullName": "Chiho Choi",
"givenName": "Chiho",
"surname": "Choi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sang Ho Yoon",
"givenName": "Sang Ho",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chin-Ning Chen",
"givenName": "Chin-Ning",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Karthik Ramani",
"givenName": "Karthik",
"surname": "Ramani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "3142-3151",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032d133",
"articleId": "12OmNC4eSyL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032d152",
"articleId": "12OmNx0A7Ez",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851d593",
"title": "Robust 3D Hand Pose Estimation in Single Depth Images: From Single-View CNN to Multi-View CNNs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d593/12OmNASraXC",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d123",
"title": "Learning Hand Articulations by Hallucinating Heat Distribution",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d123/12OmNCcKQwN",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/04/08338122",
"title": "Real-Time 3D Hand Pose Estimation with 3D Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2019/04/08338122/13rRUx0xPJX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/08/08675397",
"title": "Generalized Feedback Loop for Joint Hand-Object Pose Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2020/08/08675397/18K0dKHQd0Y",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1077",
"title": "CPF: Learning a Contact Potential Field to Model the Hand-Object Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1077/1BmF0nZi9ck",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3253",
"title": "GOAL: Generating 4D Whole-Body Motion for Hand-Object Grasping",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3253/1H1jAb3RmsU",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c335",
"title": "Aligning Latent Spaces for 3D Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c335/1hVlrNTxTeU",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412126",
"title": "PEAN: 3D Hand Pose Estimation Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412126/1tmizxbU8Cs",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100c266",
"title": "SCAT: Stride Consistency with Auto-regressive regressor and Transformer for hand pose estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100c266/1yNhriirGcE",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09599544",
"title": "HandVoxNet++: 3D Hand Shape and Pose Estimation Using Voxel-Based Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09599544/1yeC9mCPAty",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC2fGts",
"title": "2017 Intelligent Systems and Computer Vision (ISCV)",
"acronym": "iscv",
"groupId": "1807104",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwFid2e",
"doi": "10.1109/ISACV.2017.8054904",
"title": "Hand pose estimation based on deep learning depth map for hand gesture recognition",
"normalizedTitle": "Hand pose estimation based on deep learning depth map for hand gesture recognition",
"abstract": "Hand pose estimation plays an important role in many applications, especially in human-computer interaction. Therefore, this topic has matured quickly in recent years. In this work we focus on the hand pose estimation from a depth map using convolutional neural networks. We propose a method for hand pose estimation by formulating a regression problem whose solution is the 16 hand joint locations. This method consists of two stages, the first one dealing a hand detection based on contours, the second one consists hand pose estimation using con-volutional neural networks. In this paper, we provide an extensive quantitative and qualitative experiments using real word depth maps from ICVL dataset. We perform a comparative evaluation with the state-of-the-art approaches to show the effectiveness and the accuracy of our method. Moreover, we propose a new application for hand gesture recognition based on our hand pose estimation method. The experimental results reported on test sequences of ICVL dataset show that the proposed application yields interesting performances and gives a marked improvement in recognition rate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hand pose estimation plays an important role in many applications, especially in human-computer interaction. Therefore, this topic has matured quickly in recent years. In this work we focus on the hand pose estimation from a depth map using convolutional neural networks. We propose a method for hand pose estimation by formulating a regression problem whose solution is the 16 hand joint locations. This method consists of two stages, the first one dealing a hand detection based on contours, the second one consists hand pose estimation using con-volutional neural networks. In this paper, we provide an extensive quantitative and qualitative experiments using real word depth maps from ICVL dataset. We perform a comparative evaluation with the state-of-the-art approaches to show the effectiveness and the accuracy of our method. Moreover, we propose a new application for hand gesture recognition based on our hand pose estimation method. The experimental results reported on test sequences of ICVL dataset show that the proposed application yields interesting performances and gives a marked improvement in recognition rate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hand pose estimation plays an important role in many applications, especially in human-computer interaction. Therefore, this topic has matured quickly in recent years. In this work we focus on the hand pose estimation from a depth map using convolutional neural networks. We propose a method for hand pose estimation by formulating a regression problem whose solution is the 16 hand joint locations. This method consists of two stages, the first one dealing a hand detection based on contours, the second one consists hand pose estimation using con-volutional neural networks. In this paper, we provide an extensive quantitative and qualitative experiments using real word depth maps from ICVL dataset. We perform a comparative evaluation with the state-of-the-art approaches to show the effectiveness and the accuracy of our method. Moreover, we propose a new application for hand gesture recognition based on our hand pose estimation method. The experimental results reported on test sequences of ICVL dataset show that the proposed application yields interesting performances and gives a marked improvement in recognition rate.",
"fno": "08054904",
"keywords": [
"Pose Estimation",
"Three Dimensional Displays",
"Image Segmentation",
"Solid Modeling",
"Neurons",
"Gesture Recognition",
"Neural Networks"
],
"authors": [
{
"affiliation": "LRIT-CNRST URAC 29, Mohammed V University In Rabat, Faculty of Sciences Rabat, Morocco",
"fullName": "Naima Otberdout",
"givenName": "Naima",
"surname": "Otberdout",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LRIT-CNRST URAC 29, Mohammed V University In Rabat, Faculty of Sciences Rabat, Morocco",
"fullName": "Lahoucine Ballihi",
"givenName": "Lahoucine",
"surname": "Ballihi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LRIT-CNRST URAC 29, Mohammed V University In Rabat, Faculty of Sciences Rabat, Morocco",
"fullName": "Driss Aboutajdine",
"givenName": "Driss",
"surname": "Aboutajdine",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iscv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2017",
"issn": null,
"isbn": "978-1-5090-4062-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08054903",
"articleId": "12OmNwFidbM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08054905",
"articleId": "12OmNyL0Tnt",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a557",
"title": "Simultaneous Hand Pose and Skeleton Bone-Lengths Estimation from a Single Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a557/12OmNASraN9",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a595",
"title": "Hand Pose Estimation Using Deep Stereovision and Markov-Chain Monte Carlo",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a595/12OmNCeK2dj",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d105",
"title": "Dynamic Hand Pose Recognition Using Depth Data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d105/12OmNvs4vtr",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2018/9497/0/949700a050",
"title": "RGB-D Hand Pose Estimation Using Fourier Descriptor",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2018/949700a050/17D45VsBU0e",
"parentPublication": {
"id": "proceedings/icdh/2018/9497/0",
"title": "2018 7th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f079",
"title": "V2V-PoseNet: Voxel-to-Voxel Prediction Network for Accurate 3D Hand and Human Pose Estimation from a Single Depth Map",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f079/17D45WHONoj",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546330",
"title": "Dynamic Projected Segmentation Networks For Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546330/17D45X7VTgX",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i417",
"title": "Hand PointNet: 3D Hand Pose Estimation Using Point Sets",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i417/17D45XERmmi",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/08/08675397",
"title": "Generalized Feedback Loop for Joint Hand-Object Pose Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2020/08/08675397/18K0dKHQd0Y",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09736619",
"title": "Recurrent 3D Hand Pose Estimation Using Cascaded Pose-Guided 3D Alignments",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09736619/1BN1OeDgXbW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093380",
"title": "DGGAN: Depth-image Guided Generative Adversarial Networks for Disentangling RGB and Depth Images in 3D Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093380/1jPbiSrXs64",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNB8Cj92",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwGIcxU",
"doi": "10.1109/ICMEW.2014.6890556",
"title": "A robust tracking algorithm for 3D hand gesture with rapid hand motion through deep learning",
"normalizedTitle": "A robust tracking algorithm for 3D hand gesture with rapid hand motion through deep learning",
"abstract": "There are two main problems that make hand gesture tracking especially difficult. One is the great number of degrees of freedom of the hand and the other one is the rapid movements that we make in natural gestures. Algorithms based on minimizing an objective function, with a good initialization, typically obtain good accuracy at low frame rates. However, these methods are very dependent on the initialization point, and fast movements on the hand position or gesture, provokes a lost of track which are unable to recover. We present a method that uses deep learning to train a set of gestures (81 gestures), that will be used as a rough estimate of the hand pose and orientation. This will serve to a registration of non rigid model algorithm that will find the parameters of hand, even when temporal assumption of smooth movements of hands is violated. To evaluate our proposed algorithm, different experiments are performed with some real sequences recorded with Intel depth sensor to demonstrate the performance in a real scenario.",
"abstracts": [
{
"abstractType": "Regular",
"content": "There are two main problems that make hand gesture tracking especially difficult. One is the great number of degrees of freedom of the hand and the other one is the rapid movements that we make in natural gestures. Algorithms based on minimizing an objective function, with a good initialization, typically obtain good accuracy at low frame rates. However, these methods are very dependent on the initialization point, and fast movements on the hand position or gesture, provokes a lost of track which are unable to recover. We present a method that uses deep learning to train a set of gestures (81 gestures), that will be used as a rough estimate of the hand pose and orientation. This will serve to a registration of non rigid model algorithm that will find the parameters of hand, even when temporal assumption of smooth movements of hands is violated. To evaluate our proposed algorithm, different experiments are performed with some real sequences recorded with Intel depth sensor to demonstrate the performance in a real scenario.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "There are two main problems that make hand gesture tracking especially difficult. One is the great number of degrees of freedom of the hand and the other one is the rapid movements that we make in natural gestures. Algorithms based on minimizing an objective function, with a good initialization, typically obtain good accuracy at low frame rates. However, these methods are very dependent on the initialization point, and fast movements on the hand position or gesture, provokes a lost of track which are unable to recover. We present a method that uses deep learning to train a set of gestures (81 gestures), that will be used as a rough estimate of the hand pose and orientation. This will serve to a registration of non rigid model algorithm that will find the parameters of hand, even when temporal assumption of smooth movements of hands is violated. To evaluate our proposed algorithm, different experiments are performed with some real sequences recorded with Intel depth sensor to demonstrate the performance in a real scenario.",
"fno": "06890556",
"keywords": [
"Three Dimensional Displays",
"Tracking",
"Data Models",
"Cameras",
"Solid Modeling",
"Joints",
"Gesture Recognition",
"Hand Model",
"Deep Learning",
"Optimization",
"Tracking"
],
"authors": [
{
"affiliation": "MCLab, CITI, Academia Sinica, Taiwan",
"fullName": "Jordi Sanchez-Riera",
"givenName": "Jordi",
"surname": "Sanchez-Riera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MCLab, CITI, Academia Sinica, Taiwan",
"fullName": "Yuan-Sheng Hsiao",
"givenName": null,
"surname": "Yuan-Sheng Hsiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MCLab, CITI, Academia Sinica, Taiwan",
"fullName": "Tekoing Lim",
"givenName": null,
"surname": "Tekoing Lim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of CSIE, National Taiwan University of Science and Technology, Taiwan",
"fullName": "Kai-Lung Hua",
"givenName": null,
"surname": "Kai-Lung Hua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MCLab, CITI, Academia Sinica, Taiwan",
"fullName": "Wen-Huang Cheng",
"givenName": null,
"surname": "Wen-Huang Cheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": "1945-7871",
"isbn": "978-1-4799-4717-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06890555",
"articleId": "12OmNwHQB9E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06890557",
"articleId": "12OmNqAU6Ha",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a001",
"title": "[POSTER] A Probabilistic Combination of CNN and RNN Estimates for Hand Gesture Based Interaction in Car",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a001/12OmNBQ2VWK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437b206",
"title": "Skeleton-Based Dynamic Hand Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437b206/12OmNCdBDX2",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2012/4778/0/4778a049",
"title": "Real-time Hand Gesture Recognition from Depth Image Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2012/4778a049/12OmNvA1hkp",
"parentPublication": {
"id": "proceedings/cgiv/2012/4778/0",
"title": "2012 Ninth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410485",
"title": "A robust hand tracking and gesture recognition method for wearable visual interfaces and its applications",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410485/12OmNvlxJpQ",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2016/8838/0/07945999",
"title": "A Simple and Effective Method for Hand Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2016/07945999/12OmNxETa8g",
"parentPublication": {
"id": "proceedings/icnisc/2016/8838/0",
"title": "2016 International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a684",
"title": "Three Dimensional Motion Trail Model for Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a684/12OmNxRF73v",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a411",
"title": "Using Appearance-Based Hand Features for Dynamic RGB-D Gesture Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a411/12OmNxWuitX",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771447",
"title": "Motion divergence fields for dynamic hand gesture recognition",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771447/12OmNxZ2Gj4",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759659",
"title": "SOM-based hand gesture recognition for virtual interactions",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759659/12OmNxd4tor",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2006/2754/0/04089336",
"title": "Hand Gesture Interaction for Virtual Training of SPG",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2006/04089336/17D45Wt3Ew6",
"parentPublication": {
"id": "proceedings/icat/2006/2754/0",
"title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45W2Wyyl",
"doi": "10.1109/CVPR.2018.00279",
"title": "Depth-Based 3D Hand Pose Estimation: From Current Achievements to Future Goals",
"normalizedTitle": "Depth-Based 3D Hand Pose Estimation: From Current Achievements to Future Goals",
"abstract": "In this paper, we strive to answer two questions: What is the current state of 3D hand pose estimation from depth images? And, what are the next challenges that need to be tackled? Following the successful Hands In the Million Challenge (HIM2017), we investigate the top 10 state-of-the-art methods on three tasks: single frame 3D pose estimation, 3D hand tracking, and hand pose estimation during object interaction. We analyze the performance of different CNN structures with regard to hand shape, joint visibility, view point and articulation distributions. Our findings include: (1) isolated 3D hand pose estimation achieves low mean errors (10 mm) in the view point range of [70, 120] degrees, but it is far from being solved for extreme view points; (2) 3D volumetric representations outperform 2D CNNs, better capturing the spatial structure of the depth data; (3) Discriminative methods still generalize poorly to unseen hand shapes; (4) While joint occlusions pose a challenge for most methods, explicit modeling of structure constraints can significantly narrow the gap between errors on visible and occluded joints.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we strive to answer two questions: What is the current state of 3D hand pose estimation from depth images? And, what are the next challenges that need to be tackled? Following the successful Hands In the Million Challenge (HIM2017), we investigate the top 10 state-of-the-art methods on three tasks: single frame 3D pose estimation, 3D hand tracking, and hand pose estimation during object interaction. We analyze the performance of different CNN structures with regard to hand shape, joint visibility, view point and articulation distributions. Our findings include: (1) isolated 3D hand pose estimation achieves low mean errors (10 mm) in the view point range of [70, 120] degrees, but it is far from being solved for extreme view points; (2) 3D volumetric representations outperform 2D CNNs, better capturing the spatial structure of the depth data; (3) Discriminative methods still generalize poorly to unseen hand shapes; (4) While joint occlusions pose a challenge for most methods, explicit modeling of structure constraints can significantly narrow the gap between errors on visible and occluded joints.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we strive to answer two questions: What is the current state of 3D hand pose estimation from depth images? And, what are the next challenges that need to be tackled? Following the successful Hands In the Million Challenge (HIM2017), we investigate the top 10 state-of-the-art methods on three tasks: single frame 3D pose estimation, 3D hand tracking, and hand pose estimation during object interaction. We analyze the performance of different CNN structures with regard to hand shape, joint visibility, view point and articulation distributions. Our findings include: (1) isolated 3D hand pose estimation achieves low mean errors (10 mm) in the view point range of [70, 120] degrees, but it is far from being solved for extreme view points; (2) 3D volumetric representations outperform 2D CNNs, better capturing the spatial structure of the depth data; (3) Discriminative methods still generalize poorly to unseen hand shapes; (4) While joint occlusions pose a challenge for most methods, explicit modeling of structure constraints can significantly narrow the gap between errors on visible and occluded joints.",
"fno": "642000c636",
"keywords": [
"Gesture Recognition",
"Image Capture",
"Image Representation",
"Object Tracking",
"Pose Estimation",
"Stereo Image Processing",
"Object Interaction",
"3 D Hand Tracking",
"Hands In The Million Challenge",
"Depth Based 3 D Hand Pose Estimation",
"CNN Structures",
"Articulation Distributions",
"Hand Shape",
"Single Frame 3 D Pose Estimation",
"Depth Images",
"3 D Volumetric Representations",
"Three Dimensional Displays",
"Task Analysis",
"Pose Estimation",
"Two Dimensional Displays",
"Joints",
"Training",
"Solid Modeling"
],
"authors": [
{
"affiliation": null,
"fullName": "Shanxin Yuan",
"givenName": "Shanxin",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guillermo Garcia-Hernando",
"givenName": "Guillermo",
"surname": "Garcia-Hernando",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Björn Stenger",
"givenName": "Björn",
"surname": "Stenger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gyeongsik Moon",
"givenName": "Gyeongsik",
"surname": "Moon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ju Yong Chang",
"givenName": "Ju Yong",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kyoung Mu Lee",
"givenName": "Kyoung Mu",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pavlo Molchanov",
"givenName": "Pavlo",
"surname": "Molchanov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jan Kautz",
"givenName": "Jan",
"surname": "Kautz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sina Honari",
"givenName": "Sina",
"surname": "Honari",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Liuhao Ge",
"givenName": "Liuhao",
"surname": "Ge",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Junsong Yuan",
"givenName": "Junsong",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xinghao Chen",
"givenName": "Xinghao",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guijin Wang",
"givenName": "Guijin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fan Yang",
"givenName": "Fan",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kai Akiyama",
"givenName": "Kai",
"surname": "Akiyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yang Wu",
"givenName": "Yang",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qingfu Wan",
"givenName": "Qingfu",
"surname": "Wan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Meysam Madadi",
"givenName": "Meysam",
"surname": "Madadi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sergio Escalera",
"givenName": "Sergio",
"surname": "Escalera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shile Li",
"givenName": "Shile",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dongheui Lee",
"givenName": "Dongheui",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Iason Oikonomidis",
"givenName": "Iason",
"surname": "Oikonomidis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Antonis Argyros",
"givenName": "Antonis",
"surname": "Argyros",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tae-Kyun Kim",
"givenName": "Tae-Kyun",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2636-2645",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000c626",
"articleId": "17D45Xq6dzm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000c646",
"articleId": "17D45WKWnIc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851d593",
"title": "Robust 3D Hand Pose Estimation in Single Depth Images: From Single-View CNN to Multi-View CNNs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d593/12OmNASraXC",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f679",
"title": "3D Convolutional Neural Networks for Efficient and Robust Hand Pose Estimation from Single Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f679/12OmNBQC895",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032e913",
"title": "Learning to Estimate 3D Hand Pose from Single RGB Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e913/12OmNwcl7Bw",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/04/08338122",
"title": "Real-Time 3D Hand Pose Estimation with 3D Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2019/04/08338122/13rRUx0xPJX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f147",
"title": "Dense 3D Regression for Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f147/17D45WaTkeL",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i417",
"title": "Hand PointNet: 3D Hand Pose Estimation Using Point Sets",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i417/17D45XERmmi",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09736619",
"title": "Recurrent 3D Hand Pose Estimation Using Cascaded Pose-Guided 3D Alignments",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09736619/1BN1OeDgXbW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c272",
"title": "Exploiting Spatial-Temporal Relationships for 3D Pose Estimation via Graph Convolutional Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c272/1hVlzcYSNig",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093316",
"title": "3D Hand Pose Estimation with Disentangled Cross-Modal Latent Space",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093316/1jPbFBfZZAI",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700a836",
"title": "MVHM: A Large-Scale Multi-View Hand Mesh Benchmark for Accurate 3D Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700a836/1uqGKiZgPzG",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "18j8Ecq0jn2",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "18j8KfzbuFy",
"doi": "10.1109/WACV.2019.00208",
"title": "Gyroscope-Aided Motion Deblurring with Deep Networks",
"normalizedTitle": "Gyroscope-Aided Motion Deblurring with Deep Networks",
"abstract": "We propose a deblurring method that incorporates gyroscope measurements into a convolutional neural network (CNN). With the help of such measurements, it can handle extremely strong and spatially-variant motion blur. At the same time, the image data is used to overcome the limitations of gyro-based blur estimation. To train our network, we also introduce a novel way of generating realistic training data using the gyroscope. The evaluation shows a clear improvement in visual quality over the state-of-the-art while achieving real-time performance. Furthermore, the method is shown to improve the performance of existing feature detectors and descriptors against the motion blur.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a deblurring method that incorporates gyroscope measurements into a convolutional neural network (CNN). With the help of such measurements, it can handle extremely strong and spatially-variant motion blur. At the same time, the image data is used to overcome the limitations of gyro-based blur estimation. To train our network, we also introduce a novel way of generating realistic training data using the gyroscope. The evaluation shows a clear improvement in visual quality over the state-of-the-art while achieving real-time performance. Furthermore, the method is shown to improve the performance of existing feature detectors and descriptors against the motion blur.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a deblurring method that incorporates gyroscope measurements into a convolutional neural network (CNN). With the help of such measurements, it can handle extremely strong and spatially-variant motion blur. At the same time, the image data is used to overcome the limitations of gyro-based blur estimation. To train our network, we also introduce a novel way of generating realistic training data using the gyroscope. The evaluation shows a clear improvement in visual quality over the state-of-the-art while achieving real-time performance. Furthermore, the method is shown to improve the performance of existing feature detectors and descriptors against the motion blur.",
"fno": "197500b914",
"keywords": [
"Convolutional Neural Nets",
"Gyroscopes",
"Image Motion Analysis",
"Image Restoration",
"Real Time Performance",
"Clear Improvement",
"Realistic Training Data",
"Image Data",
"Spatially Variant Motion Blur",
"CNN",
"Convolutional Neural Network",
"Gyroscope Measurements",
"Deblurring Method",
"Deep Networks",
"Gyroscope Aided Motion Deblurring",
"Cameras",
"Gyroscopes",
"Noise Measurement",
"Visualization",
"Real Time Systems",
"Deconvolution",
"Mobile Handsets"
],
"authors": [
{
"affiliation": null,
"fullName": "Janne Mustaniemi",
"givenName": "Janne",
"surname": "Mustaniemi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Juho Kannala",
"givenName": "Juho",
"surname": "Kannala",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Simo Särkkä",
"givenName": "Simo",
"surname": "Särkkä",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiri Matas",
"givenName": "Jiri",
"surname": "Matas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Janne Heikkila",
"givenName": "Janne",
"surname": "Heikkila",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1914-1922",
"year": "2019",
"issn": "1550-5790",
"isbn": "978-1-7281-1975-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "197500b905",
"articleId": "18j8Jo4EgTK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "197500b923",
"articleId": "18j8GnDjoWs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/taai/2017/4203/0/4203a005",
"title": "Camera Pose Trace Based on Motion Sensor in Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2017/4203a005/12OmNBWzHRT",
"parentPublication": {
"id": "proceedings/taai/2017/4203/0",
"title": "2017 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/worv/2013/5646/0/06521916",
"title": "Why would i want a gyroscope on my RGB-D sensor?",
"doi": null,
"abstractUrl": "/proceedings-article/worv/2013/06521916/12OmNBhpRYY",
"parentPublication": {
"id": "proceedings/worv/2013/5646/0",
"title": "2013 IEEE Workshop on Robot Vision (WORV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840b465",
"title": "Forward Motion Deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840b465/12OmNqFJhAs",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761039",
"title": "Improvement of feature matching in catadioptric images using gyroscope data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761039/12OmNroijhB",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034c993",
"title": "Deep Generative Filter for Motion Deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c993/12OmNz4SOxv",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a191",
"title": "Space-Variant Image Deblurring on Smartphones Using Inertial Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a191/12OmNzcPAdQ",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890123",
"title": "Sensor-assisted image deblurring of consumer photos on smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890123/12OmNzkuKyL",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546041",
"title": "Fast Motion Deblurring for Feature Detection and Matching Using Inertial Measurements",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546041/17D45WXIkDl",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998145",
"title": "3D Hand Tracking in the Presence of Excessive Motion Blur",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998145/1hpPCGSeWXu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2020/9627/0/962700a112",
"title": "Total angular velocity noise research of fan shaped superfluid interference grating gyroscope",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2020/962700a112/1rvCAs4CqBi",
"parentPublication": {
"id": "proceedings/ifeea/2020/9627/0",
"title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1jAb3RmsU",
"doi": "10.1109/CVPR52688.2022.01291",
"title": "GOAL: Generating 4D Whole-Body Motion for Hand-Object Grasping",
"normalizedTitle": "GOAL: Generating 4D Whole-Body Motion for Hand-Object Grasping",
"abstract": "Generating digital humans that move realistically has many applications and is widely studied, but existing meth-odsfocus on the major limbs of the body, ignoring the hands and head. Hands have been separately studied, but the fo-cus has been on generating realistic static grasps of objects. To synthesize virtual characters that interact with the world, we need to generate full-body motions and realistic hand grasps simultaneously. Both sub-problems are challenging on their own and, together, the state space of poses is sig-nificantly larger, the scales of hand and body motions dif-fer, and the whole-body posture and the hand grasp must agree, satisfy physical constraints, and be plausible. Additionally, the head is involved because the avatar must look at the object to interact with it. For the first time, we ad-dress the problem of generating full-body, hand and head motions of an avatar grasping an unknown object. As in-put, our method, called GOAL, takes a 3D object, its pose, and a starting 3D body pose and shape. GOAL outputs a sequence of whole-body poses using two novel networks. First, GNet generates a goal whole-body grasp with a re-alistic body, head, arm, and hand pose, as well as hand-object contact. Second, MNet generates the motion be-tween the starting and goal pose. This is challenging, as it requires the avatar to walk towards the object with foot-ground contact, orient the head towards it, reach out, and grasp it with a realistic hand pose and hand-object con-tact. To achieve this the networks exploit a representation that combines SMPL-X body parameters and 3D vertex off-sets. We train and evaluate GOAL, both qualitatively and quantitatively, on the GRAB dataset. Results show that GOAL generalizes well to unseen objects, outperforming baselines. A perceptual study shows that GOAL's gener-ated motions approach the realism of GRAB's ground truth. GOAL takes a step towards generating realistic full-body object grasping motion. Our models and code are available at https://goal.is.tue.mpg.de.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Generating digital humans that move realistically has many applications and is widely studied, but existing meth-odsfocus on the major limbs of the body, ignoring the hands and head. Hands have been separately studied, but the fo-cus has been on generating realistic static grasps of objects. To synthesize virtual characters that interact with the world, we need to generate full-body motions and realistic hand grasps simultaneously. Both sub-problems are challenging on their own and, together, the state space of poses is sig-nificantly larger, the scales of hand and body motions dif-fer, and the whole-body posture and the hand grasp must agree, satisfy physical constraints, and be plausible. Additionally, the head is involved because the avatar must look at the object to interact with it. For the first time, we ad-dress the problem of generating full-body, hand and head motions of an avatar grasping an unknown object. As in-put, our method, called GOAL, takes a 3D object, its pose, and a starting 3D body pose and shape. GOAL outputs a sequence of whole-body poses using two novel networks. First, GNet generates a goal whole-body grasp with a re-alistic body, head, arm, and hand pose, as well as hand-object contact. Second, MNet generates the motion be-tween the starting and goal pose. This is challenging, as it requires the avatar to walk towards the object with foot-ground contact, orient the head towards it, reach out, and grasp it with a realistic hand pose and hand-object con-tact. To achieve this the networks exploit a representation that combines SMPL-X body parameters and 3D vertex off-sets. We train and evaluate GOAL, both qualitatively and quantitatively, on the GRAB dataset. Results show that GOAL generalizes well to unseen objects, outperforming baselines. A perceptual study shows that GOAL's gener-ated motions approach the realism of GRAB's ground truth. GOAL takes a step towards generating realistic full-body object grasping motion. Our models and code are available at https://goal.is.tue.mpg.de.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Generating digital humans that move realistically has many applications and is widely studied, but existing meth-odsfocus on the major limbs of the body, ignoring the hands and head. Hands have been separately studied, but the fo-cus has been on generating realistic static grasps of objects. To synthesize virtual characters that interact with the world, we need to generate full-body motions and realistic hand grasps simultaneously. Both sub-problems are challenging on their own and, together, the state space of poses is sig-nificantly larger, the scales of hand and body motions dif-fer, and the whole-body posture and the hand grasp must agree, satisfy physical constraints, and be plausible. Additionally, the head is involved because the avatar must look at the object to interact with it. For the first time, we ad-dress the problem of generating full-body, hand and head motions of an avatar grasping an unknown object. As in-put, our method, called GOAL, takes a 3D object, its pose, and a starting 3D body pose and shape. GOAL outputs a sequence of whole-body poses using two novel networks. First, GNet generates a goal whole-body grasp with a re-alistic body, head, arm, and hand pose, as well as hand-object contact. Second, MNet generates the motion be-tween the starting and goal pose. This is challenging, as it requires the avatar to walk towards the object with foot-ground contact, orient the head towards it, reach out, and grasp it with a realistic hand pose and hand-object con-tact. To achieve this the networks exploit a representation that combines SMPL-X body parameters and 3D vertex off-sets. We train and evaluate GOAL, both qualitatively and quantitatively, on the GRAB dataset. Results show that GOAL generalizes well to unseen objects, outperforming baselines. A perceptual study shows that GOAL's gener-ated motions approach the realism of GRAB's ground truth. GOAL takes a step towards generating realistic full-body object grasping motion. Our models and code are available at https://goal.is.tue.mpg.de.",
"fno": "694600n3253",
"keywords": [
"Avatars",
"Computer Animation",
"Dexterous Manipulators",
"Gait Analysis",
"Grippers",
"Humanoid Robots",
"Image Motion Analysis",
"Learning Artificial Intelligence",
"Motion Control",
"Pose Estimation",
"Shape Recognition",
"Solid Modelling",
"Statistical Analysis",
"Virtual Reality",
"Whole Body Posture",
"Avatar",
"Head Motions",
"Unknown Object",
"Called GOAL",
"Whole Body Poses",
"Re Alistic Body",
"Hand Object Contact",
"Realistic Hand Pose",
"Unseen Objects",
"GOA Ls Gener Ated Motions",
"Realistic Full Body Object Grasping Motion",
"Whole Body Motion",
"Hand Object Grasping",
"Existing Meth Odsfocus",
"Full Body Motions",
"Realistic Hand Grasps",
"Body Motions Dif Fer",
"Solid Modeling",
"Computer Vision",
"Three Dimensional Displays",
"Codes",
"Tracking",
"Shape",
"Avatars"
],
"authors": [
{
"affiliation": "Max Planck Institute for Intelligent Systems,Tubingen,Germany",
"fullName": "Omid Taheri",
"givenName": "Omid",
"surname": "Taheri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute for Intelligent Systems,Tubingen,Germany",
"fullName": "Vasileios Choutas",
"givenName": "Vasileios",
"surname": "Choutas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute for Intelligent Systems,Tubingen,Germany",
"fullName": "Michael J. Black",
"givenName": "Michael J.",
"surname": "Black",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Institute for Intelligent Systems,Tubingen,Germany",
"fullName": "Dimitrios Tzionas",
"givenName": "Dimitrios",
"surname": "Tzionas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "13253-13263",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1jA7HNo7C",
"name": "pcvpr202269460-09878822s1-mm_694600n3253.zip",
"size": "2.43 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878822s1-mm_694600n3253.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600n3243",
"articleId": "1H1luesFPiw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600n3264",
"articleId": "1H1mTqlw5pe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892339",
"title": "Simulating anthropomorphic upper body actions in virtual reality using head and hand motion data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892339/12OmNCmGO1G",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2012/1204/0/06184182",
"title": "Visual interpenetration tradeoffs in whole-hand virtual grasping",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2012/06184182/12OmNvA1hj6",
"parentPublication": {
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549373",
"title": "Integrating head and full-body tracking for embodiment in virtual characters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549373/12OmNx0RIVC",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130392",
"title": "Towards robust cross-user hand tracking and shape recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130392/12OmNzsJ7wQ",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900c307",
"title": "Accurate 3D Hand Pose Estimation for Whole-Body 3D Human Mesh Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900c307/1G56n5brhQI",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10066837",
"title": "VR-HandNet: A Visually and Physically Plausible Hand Manipulation System in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10066837/1LtR7JYxVEk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797884",
"title": "Distributed, Collaborative Virtual Reality Application for Product Development with Simple Avatar Calibration Method",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797884/1cJ0TJmlU9q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797787",
"title": "The Effect of Hand Size and Interaction Modality on the Virtual Hand Illusion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797787/1cJ179JUrPa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08952604",
"title": "Effect of Avatar Appearance on Detection Thresholds for Remapped Hand Movements",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08952604/1gqqhpSZ19S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382876",
"title": "Evidence of Racial Bias Using Immersive Virtual Reality: Analysis of Head and Hand Motions During Shooting Decisions",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382876/1saZsrqdHJm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1i5mkDyiIUg",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1i5mvFudr68",
"doi": "10.1109/ICCVW.2019.00348",
"title": "3D Hand Pose Estimation from RGB Using Privileged Learning with Depth Data",
"normalizedTitle": "3D Hand Pose Estimation from RGB Using Privileged Learning with Depth Data",
"abstract": "This paper proposes a method for 3D hand pose estimation given a large dataset of depth images with joint annotations, and a smaller dataset of depth and RGB image pairs with joint annotations. We explore different ways of using the depth data at the training stage to improve the pose estimation accuracy of a network that only takes RGB images as input. By using paired RGB and depth images, we are able to supervise the RGB-based network to learn middle layer features that mimic that of a network trained on largescale, accurately annotated depth data. Further, depth data provides accurate foreground masks, which are employed to learn better feature activations in the RGB network. During testing, when only RGB images are available, our method produces accurate 3D hand pose predictions. The method is also shown to perform well on the 2D hand pose estimation task. We validate the approach on three public datasets, and compare it to other published methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a method for 3D hand pose estimation given a large dataset of depth images with joint annotations, and a smaller dataset of depth and RGB image pairs with joint annotations. We explore different ways of using the depth data at the training stage to improve the pose estimation accuracy of a network that only takes RGB images as input. By using paired RGB and depth images, we are able to supervise the RGB-based network to learn middle layer features that mimic that of a network trained on largescale, accurately annotated depth data. Further, depth data provides accurate foreground masks, which are employed to learn better feature activations in the RGB network. During testing, when only RGB images are available, our method produces accurate 3D hand pose predictions. The method is also shown to perform well on the 2D hand pose estimation task. We validate the approach on three public datasets, and compare it to other published methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a method for 3D hand pose estimation given a large dataset of depth images with joint annotations, and a smaller dataset of depth and RGB image pairs with joint annotations. We explore different ways of using the depth data at the training stage to improve the pose estimation accuracy of a network that only takes RGB images as input. By using paired RGB and depth images, we are able to supervise the RGB-based network to learn middle layer features that mimic that of a network trained on largescale, accurately annotated depth data. Further, depth data provides accurate foreground masks, which are employed to learn better feature activations in the RGB network. During testing, when only RGB images are available, our method produces accurate 3D hand pose predictions. The method is also shown to perform well on the 2D hand pose estimation task. We validate the approach on three public datasets, and compare it to other published methods.",
"fno": "502300c866",
"keywords": [
"Feature Extraction",
"Image Colour Analysis",
"Image Segmentation",
"Learning Artificial Intelligence",
"Pose Estimation",
"Depth Data",
"Pose Estimation Accuracy",
"RGB Images",
"Paired RGB",
"Depth Images",
"RGB Based Network",
"3 D Hand Pose Estimation",
"Accurate 3 D Hand Pose Predictions",
"Three Dimensional Displays",
"Pose Estimation",
"Training",
"Two Dimensional Displays",
"Solid Modeling",
"Task Analysis",
"Testing",
"Hand Pose Estimation",
"RGB",
"Privileged Learning"
],
"authors": [
{
"affiliation": "Huawei Noah's Ark Lab",
"fullName": "Shanxin Yuan",
"givenName": "Shanxin",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Rakuten Institute of Technology",
"fullName": "Bjorn Stenger",
"givenName": "Bjorn",
"surname": "Stenger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London",
"fullName": "Tae-Kyun Kim",
"givenName": "Tae-Kyun",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2866-2873",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5023-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "502300c856",
"articleId": "1i5mqmANEVq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "502300c874",
"articleId": "1i5mB0v3uuI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f679",
"title": "3D Convolutional Neural Networks for Efficient and Robust Hand Pose Estimation from Single Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f679/12OmNBQC895",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032e913",
"title": "Learning to Estimate 3D Hand Pose from Single RGB Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e913/12OmNwcl7Bw",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b188",
"title": "Monocular RGB Hand Pose Inference from Unsupervised Refinable Nets",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b188/17D45VObpN7",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c636",
"title": "Depth-Based 3D Hand Pose Estimation: From Current Achievements to Future Goals",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c636/17D45W2Wyyl",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a089",
"title": "Cross-Modal Deep Variational Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a089/17D45Xh13pi",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i966",
"title": "Learning Local RGB-to-CAD Correspondences for Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i966/1hVl6ZBSX4s",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/11/09091090",
"title": "3D Hand Pose Estimation Using Synthetic Data and Weakly Labeled RGB Images",
"doi": null,
"abstractUrl": "/journal/tp/2021/11/09091090/1jLOJKgqXGU",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093316",
"title": "3D Hand Pose Estimation with Disentangled Cross-Modal Latent Space",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093316/1jPbFBfZZAI",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093380",
"title": "DGGAN: Depth-image Guided Generative Adversarial Networks for Disentangling RGB and Depth Images in 3D Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093380/1jPbiSrXs64",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c372",
"title": "Two-hand Global 3D Pose Estimation using Monocular RGB",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c372/1uqGs9R20ww",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxbmSBT",
"doi": "10.1109/ICCV.2017.341",
"title": "Monocular Free-Head 3D Gaze Tracking with Deep Learning and Geometry Constraints",
"normalizedTitle": "Monocular Free-Head 3D Gaze Tracking with Deep Learning and Geometry Constraints",
"abstract": "Free-head 3D gaze tracking outputs both the eye location and the gaze vector in 3D space, and it has wide applications in scenarios such as driver monitoring, advertisement analysis and surveillance. A reliable and low-cost monocular solution is critical for pervasive usage in these areas. Noticing that a gaze vector is a composition of head pose and eyeball movement in a geometrically deterministic way, we propose a novel gaze transform layer to connect separate head pose and eyeball movement models. The proposed decomposition does not suffer from head-gaze correlation overfitting and makes it possible to use datasets existing for other tasks. To add stronger supervision for better network training, we propose a two-step training strategy, which first trains sub-tasks with rough labels and then jointly trains with accurate gaze labels. To enable good cross-subject performance under various conditions, we collect a large dataset which has full coverage of head poses and eyeball movements, contains 200 subjects, and has diverse illumination conditions. Our deep solution achieves state-of-the-art gaze tracking accuracy, reaching 5.6° cross-subject prediction error using a small network running at 1000 fps on a single CPU (excluding face alignment time) and 4.3° cross-subject error with a deeper network.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Free-head 3D gaze tracking outputs both the eye location and the gaze vector in 3D space, and it has wide applications in scenarios such as driver monitoring, advertisement analysis and surveillance. A reliable and low-cost monocular solution is critical for pervasive usage in these areas. Noticing that a gaze vector is a composition of head pose and eyeball movement in a geometrically deterministic way, we propose a novel gaze transform layer to connect separate head pose and eyeball movement models. The proposed decomposition does not suffer from head-gaze correlation overfitting and makes it possible to use datasets existing for other tasks. To add stronger supervision for better network training, we propose a two-step training strategy, which first trains sub-tasks with rough labels and then jointly trains with accurate gaze labels. To enable good cross-subject performance under various conditions, we collect a large dataset which has full coverage of head poses and eyeball movements, contains 200 subjects, and has diverse illumination conditions. Our deep solution achieves state-of-the-art gaze tracking accuracy, reaching 5.6° cross-subject prediction error using a small network running at 1000 fps on a single CPU (excluding face alignment time) and 4.3° cross-subject error with a deeper network.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Free-head 3D gaze tracking outputs both the eye location and the gaze vector in 3D space, and it has wide applications in scenarios such as driver monitoring, advertisement analysis and surveillance. A reliable and low-cost monocular solution is critical for pervasive usage in these areas. Noticing that a gaze vector is a composition of head pose and eyeball movement in a geometrically deterministic way, we propose a novel gaze transform layer to connect separate head pose and eyeball movement models. The proposed decomposition does not suffer from head-gaze correlation overfitting and makes it possible to use datasets existing for other tasks. To add stronger supervision for better network training, we propose a two-step training strategy, which first trains sub-tasks with rough labels and then jointly trains with accurate gaze labels. To enable good cross-subject performance under various conditions, we collect a large dataset which has full coverage of head poses and eyeball movements, contains 200 subjects, and has diverse illumination conditions. Our deep solution achieves state-of-the-art gaze tracking accuracy, reaching 5.6° cross-subject prediction error using a small network running at 1000 fps on a single CPU (excluding face alignment time) and 4.3° cross-subject error with a deeper network.",
"fno": "1032d162",
"keywords": [
"Eye",
"Feature Extraction",
"Gaze Tracking",
"Image Motion Analysis",
"Learning Artificial Intelligence",
"Pose Estimation",
"Monocular Free Head 3 D",
"Geometry Constraints",
"Eye Location",
"Gaze Vector",
"Wide Applications",
"Driver Monitoring",
"Advertisement Analysis",
"Reliable Cost Monocular Solution",
"Low Cost Monocular Solution",
"Pervasive Usage",
"Eyeball Movement Models",
"Head Gaze Correlation",
"Network Training",
"Two Step Training Strategy",
"Trains Sub Tasks",
"Accurate Gaze Labels",
"Good Cross Subject Performance",
"Head Poses",
"State Of The Art Gaze",
"Cross Subject Prediction Error",
"Video Surveillance",
"CPU",
"Head",
"Gaze Tracking",
"Training",
"Three Dimensional Displays",
"Correlation",
"Predictive Models",
"Transforms"
],
"authors": [
{
"affiliation": null,
"fullName": "Haoping Deng",
"givenName": "Haoping",
"surname": "Deng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wangjiang Zhu",
"givenName": "Wangjiang",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "3162-3171",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032d152",
"articleId": "12OmNx0A7Ez",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032d172",
"articleId": "12OmNywxlT5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/motion/2002/1860/0/18600125",
"title": "Comparative Study of Coarse Head Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/motion/2002/18600125/12OmNAGw13Q",
"parentPublication": {
"id": "proceedings/motion/2002/1860/0",
"title": "Proceedings Workshop on Motion and Video Computing (MOTION 2002)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a789",
"title": "Human Computer Interaction with Head Pose, Eye Gaze and Body Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a789/12OmNASILS4",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a606",
"title": "Eye-Model-Based Gaze Estimation by RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a606/12OmNyqiaTI",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2017/4941/0/07912208",
"title": "Gaze Estimation Based on Eyeball-Head Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2017/07912208/12OmNzWfoVQ",
"parentPublication": {
"id": "proceedings/wacvw/2017/4941/0",
"title": "2017 IEEE Winter Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09706357",
"title": "Towards High Performance Low Complexity Calibration in Appearance Based Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09706357/1AO2a7pgNPO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a554",
"title": "Estimating Gaze From Head and Hand Pose and Scene Images for Open-Ended Exploration in VR Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a554/1tnY5akLwvS",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxqNN9Xqw",
"doi": "10.1109/VRW50115.2020.00123",
"title": "Gaze Analysis and Prediction in Virtual Reality",
"normalizedTitle": "Gaze Analysis and Prediction in Virtual Reality",
"abstract": "In virtual reality (VR) systems, users’ gaze information has gained importance in recent years. It can be applied to many aspects, including VR content design, eye-movement based interaction, gaze-contingent rendering, etc. In this context, it becomes increasingly important to understand users’ gaze behaviors in virtual reality and to predict users’ gaze positions. This paper presents research in gaze behavior analysis and gaze position prediction in virtual reality. Specifically, this paper focuses on static virtual scenes and dynamic virtual scenes under free-viewing conditions. Users’ gaze data in virtual scenes are collected and statistical analysis is performed on the recorded data. The analysis reveals that users’ gaze positions are correlated with their head rotation velocities and the salient regions of the content. In dynamic scenes, users’ gaze positions also have strong correlations with the positions of dynamic objects. A data-driven eye-head coordination model is proposed for realtime gaze prediction in static scenes and a CNN-based model is derived for predicting gaze positions in dynamic scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In virtual reality (VR) systems, users’ gaze information has gained importance in recent years. It can be applied to many aspects, including VR content design, eye-movement based interaction, gaze-contingent rendering, etc. In this context, it becomes increasingly important to understand users’ gaze behaviors in virtual reality and to predict users’ gaze positions. This paper presents research in gaze behavior analysis and gaze position prediction in virtual reality. Specifically, this paper focuses on static virtual scenes and dynamic virtual scenes under free-viewing conditions. Users’ gaze data in virtual scenes are collected and statistical analysis is performed on the recorded data. The analysis reveals that users’ gaze positions are correlated with their head rotation velocities and the salient regions of the content. In dynamic scenes, users’ gaze positions also have strong correlations with the positions of dynamic objects. A data-driven eye-head coordination model is proposed for realtime gaze prediction in static scenes and a CNN-based model is derived for predicting gaze positions in dynamic scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In virtual reality (VR) systems, users’ gaze information has gained importance in recent years. It can be applied to many aspects, including VR content design, eye-movement based interaction, gaze-contingent rendering, etc. In this context, it becomes increasingly important to understand users’ gaze behaviors in virtual reality and to predict users’ gaze positions. This paper presents research in gaze behavior analysis and gaze position prediction in virtual reality. Specifically, this paper focuses on static virtual scenes and dynamic virtual scenes under free-viewing conditions. Users’ gaze data in virtual scenes are collected and statistical analysis is performed on the recorded data. The analysis reveals that users’ gaze positions are correlated with their head rotation velocities and the salient regions of the content. In dynamic scenes, users’ gaze positions also have strong correlations with the positions of dynamic objects. A data-driven eye-head coordination model is proposed for realtime gaze prediction in static scenes and a CNN-based model is derived for predicting gaze positions in dynamic scenes.",
"fno": "09090417",
"keywords": [
"Solid Modeling",
"Predictive Models",
"Virtual Reality",
"Task Analysis",
"Analytical Models",
"Visualization",
"Correlation",
"Gaze Analysis",
"Gaze Prediction",
"Eye Tracking",
"Visual Saliency",
"Saliency Prediction",
"Convolutional Neural Network CNN",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Peking University",
"fullName": "Zhiming Hu",
"givenName": "Zhiming",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "543-544",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090604",
"articleId": "1jIxi1ubEcg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090624",
"articleId": "1jIxyeqrhrG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2022/9617/0/961700a832",
"title": "GazeDock: Gaze-Only Menu Selection in Virtual Reality using Auto-Triggering Peripheral Menu",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a832/1CJbR6qnKdW",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a281",
"title": "The Stare-in-the-Crowd Effect in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a281/1CJbRovjGSI",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a562",
"title": "Gaze Capture based Considerate Behaviour Control of Virtual Guiding Agent",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a562/1CJfoWhFCXm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798204",
"title": "Studying Gaze Behaviour during Collision Avoidance with a Virtual Walker: Influence of the Virtual Reality Setup",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798204/1cJ11rHzFi8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998375",
"title": "DGaze: CNN-Based Gaze Prediction in Dynamic Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998375/1hpPBdSWXTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089474",
"title": "A Comparative Analysis of 3D User Interaction: How to Move Virtual Objects in Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089474/1jIx8JYL1wk",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089637",
"title": "Eye-Gaze Activity in Crowds: Impact of Virtual Reality and Density",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089637/1jIx9WIWd5C",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089592",
"title": "Evaluating Virtual Reality Experiences Through Participant Choices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089592/1jIxc1ZeLza",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199574",
"title": "Gaze-Dependent Simulation of Light Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199574/1ncgnMqzLJm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a707",
"title": "[DC] Eye Fixation Forecasting in Task-Oriented Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a707/1tnWQmeJsZi",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tTtxY6uPmM",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"acronym": "percom",
"groupId": "1000551",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tTtABGgVJS",
"doi": "10.1109/PERCOM50583.2021.9439113",
"title": "GAZEL: Runtime Gaze Tracking for Smartphones",
"normalizedTitle": "GAZEL: Runtime Gaze Tracking for Smartphones",
"abstract": "Although work has been conducted on smartphone gaze tracking, the existing techniques are not pervasively used because of their heavy weight and low accuracy. Our preliminary analysis shows that these techniques would work better if their models were trained with data from tablets which have large screens. In this paper, we propose GAZEL, a runtime smartphone gaze-tracking scheme that achieves high accuracy on real devices. The key idea of GAZEL, a tablet-to-smartphone transfer learning, is to train a CNN model with data collected from tablets and then transplant the model to a smartphone. To achieve the goal, we designed a new CNN-based model architecture that is head pose resilient and light enough to operate at runtime. We also exploit implicit calibration to alleviate errors caused by differences in users' visual and device characteristics. The experiment results with commercial smartphones show that GAZEL achieves 27.5% better accuracy on smartphones compared to the state-of-the-art techniques and provides gaze tracking at up to 18 fps which is practically usable at runtime.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although work has been conducted on smartphone gaze tracking, the existing techniques are not pervasively used because of their heavy weight and low accuracy. Our preliminary analysis shows that these techniques would work better if their models were trained with data from tablets which have large screens. In this paper, we propose GAZEL, a runtime smartphone gaze-tracking scheme that achieves high accuracy on real devices. The key idea of GAZEL, a tablet-to-smartphone transfer learning, is to train a CNN model with data collected from tablets and then transplant the model to a smartphone. To achieve the goal, we designed a new CNN-based model architecture that is head pose resilient and light enough to operate at runtime. We also exploit implicit calibration to alleviate errors caused by differences in users' visual and device characteristics. The experiment results with commercial smartphones show that GAZEL achieves 27.5% better accuracy on smartphones compared to the state-of-the-art techniques and provides gaze tracking at up to 18 fps which is practically usable at runtime.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although work has been conducted on smartphone gaze tracking, the existing techniques are not pervasively used because of their heavy weight and low accuracy. Our preliminary analysis shows that these techniques would work better if their models were trained with data from tablets which have large screens. In this paper, we propose GAZEL, a runtime smartphone gaze-tracking scheme that achieves high accuracy on real devices. The key idea of GAZEL, a tablet-to-smartphone transfer learning, is to train a CNN model with data collected from tablets and then transplant the model to a smartphone. To achieve the goal, we designed a new CNN-based model architecture that is head pose resilient and light enough to operate at runtime. We also exploit implicit calibration to alleviate errors caused by differences in users' visual and device characteristics. The experiment results with commercial smartphones show that GAZEL achieves 27.5% better accuracy on smartphones compared to the state-of-the-art techniques and provides gaze tracking at up to 18 fps which is practically usable at runtime.",
"fno": "09439113",
"keywords": [
"Calibration",
"Cellular Neural Nets",
"Learning Artificial Intelligence",
"Mobile Computing",
"Smart Phones",
"GAZEL",
"Runtime Gaze Tracking",
"Smartphone Gaze Tracking",
"Heavy Weight",
"Tablets",
"Runtime Smartphone Gaze Tracking Scheme",
"Tablet To Smartphone Transfer Learning",
"CNN Model",
"CNN Based Model Architecture",
"Commercial Smartphones",
"Pervasive Computing",
"Visualization",
"Runtime",
"Transfer Learning",
"Gaze Tracking",
"Computer Architecture",
"Data Models",
"Runtime",
"Gaze Tracking",
"Mobile",
"Smartphone",
"Transfer Learning",
"Head Pose"
],
"authors": [
{
"affiliation": "Yonsei University,Department of Computer Science,Seoul,Republic of Korea",
"fullName": "Joonbeom Park",
"givenName": "Joonbeom",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Department of Computer Science,Seoul,Republic of Korea",
"fullName": "Seonghoon Park",
"givenName": "Seonghoon",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Department of Computer Science,Seoul,Republic of Korea",
"fullName": "Hojung Cha",
"givenName": "Hojung",
"surname": "Cha",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "percom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-10",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0418-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09439128",
"articleId": "1tTtycpfHIk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09439125",
"articleId": "1tTtyVVjGTK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmi/2002/1834/0/18340261",
"title": "Active Gaze Tracking for Human-Robot Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icmi/2002/18340261/12OmNAGNCeq",
"parentPublication": {
"id": "proceedings/icmi/2002/1834/0",
"title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a048",
"title": "Implementation and Optimization of the Eye Gaze Tracking System Based on DM642",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a048/12OmNs4S8I4",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/1/4252a131",
"title": "A Simplified 3D Gaze Tracking Technology with Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a131/12OmNwqft0F",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmsp/2011/4356/1/4356a261",
"title": "Object Recognition and Selection Method by Gaze Tracking and SURF Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/cmsp/2011/4356a261/12OmNx4yvEY",
"parentPublication": {
"id": "proceedings/cmsp/2011/4356/1",
"title": "Multimedia and Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2011/1451/0/06000327",
"title": "Gaze tracking as a game input interface",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2011/06000327/12OmNxRWIeo",
"parentPublication": {
"id": "proceedings/cgames/2011/1451/0",
"title": "2011 16th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09802919",
"title": "Continuous Gaze Tracking With Implicit Saliency-Aware Calibration on Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09802919/1Eo1vvDggH6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389490",
"title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412205",
"title": "Adaptive Feature Fusion Network for Gaze Tracking in Mobile Tablets",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412205/1tmjcNMinsc",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrAMEOd",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"acronym": "csci",
"groupId": "1803739",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvSbBHM",
"doi": "10.1109/CSCI.2016.0034",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"normalizedTitle": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"abstract": "Hearing loss affects hearing impairment at diversely different frequencies, so it is crucial that a good hearing aid should have high frequency resolution. We have developed a 64 channel hearing aid which provides precise control of 125Hz resolution over 8000Hz frequency spectrum with non-linear compression. Probabilistic noise reduction algorithm and feedback cancellation based on frequency domain analysis were developed, which enhances speech clarity while minimizing undesirable ambient noises. The hearing aid can also be controlled and calibrated wirelessly via Bluetooth.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hearing loss affects hearing impairment at diversely different frequencies, so it is crucial that a good hearing aid should have high frequency resolution. We have developed a 64 channel hearing aid which provides precise control of 125Hz resolution over 8000Hz frequency spectrum with non-linear compression. Probabilistic noise reduction algorithm and feedback cancellation based on frequency domain analysis were developed, which enhances speech clarity while minimizing undesirable ambient noises. The hearing aid can also be controlled and calibrated wirelessly via Bluetooth.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hearing loss affects hearing impairment at diversely different frequencies, so it is crucial that a good hearing aid should have high frequency resolution. We have developed a 64 channel hearing aid which provides precise control of 125Hz resolution over 8000Hz frequency spectrum with non-linear compression. Probabilistic noise reduction algorithm and feedback cancellation based on frequency domain analysis were developed, which enhances speech clarity while minimizing undesirable ambient noises. The hearing aid can also be controlled and calibrated wirelessly via Bluetooth.",
"fno": "07881327",
"keywords": [
"Bluetooth",
"Hearing Aids",
"Noise Abatement",
"Speech Intelligibility",
"Hearing Aid Development",
"CSR 8675 Bluetooth Chip",
"Hearing Loss",
"Hearing Impairment",
"Nonlinear Compression",
"Probabilistic Noise Reduction Algorithm",
"Feedback Cancellation",
"Frequency Domain Analysis",
"Speech Clarity",
"Ambient Noises",
"Noise Figure 6 D B",
"Frequency 8000 Hz",
"Speech",
"Auditory System",
"Hearing Aids",
"Noise Reduction",
"Bluetooth",
"Noise Measurement",
"Fitting",
"125 Hz Resolution",
"Non Linear Compression",
"Audiogram",
"64 Channel Spectrum",
"Noise Reduction",
"Feedback Cancellation",
"CSR 8675",
"Bluetooth"
],
"authors": [
{
"affiliation": null,
"fullName": "S.S. Jarng",
"givenName": "S.S.",
"surname": "Jarng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "A. Amatya",
"givenName": "A.",
"surname": "Amatya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Y.J. Kwon",
"givenName": "Y.J.",
"surname": "Kwon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "D.S. Jarng",
"givenName": "D.S.",
"surname": "Jarng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csci",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "143-148",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-5510-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07881326",
"articleId": "12OmNBqMDf3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07881328",
"articleId": "12OmNviZlMm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/apscc/2011/4624/0/4624a395",
"title": "64 Channel Digital Hearing Aid Emdedded Firmware",
"doi": null,
"abstractUrl": "/proceedings-article/apscc/2011/4624a395/12OmNAlvHpQ",
"parentPublication": {
"id": "proceedings/apscc/2011/4624/0",
"title": "2011 IEEE Asia -Pacific Services Computing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itag/2016/3738/0/3738a077",
"title": "User Involvement in Design and Application of Virtual Reality Gamification to Facilitate the Use of Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/itag/2016/3738a077/12OmNCzsKFe",
"parentPublication": {
"id": "proceedings/itag/2016/3738/0",
"title": "2016 International Conference on Interactive Technologies and Games (iTAG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/4/05745533",
"title": "Signal processing, hearing aid design, and the psychoacoustic turing test",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745533/12OmNrAv3Gu",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00151068",
"title": "A digital filterbank hearing aid-design, implementation and evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00151068/12OmNvAiSpE",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2011/4208/0/05763297",
"title": "A 0.964mW digital hearing aid system",
"doi": null,
"abstractUrl": "/proceedings-article/date/2011/05763297/12OmNzUPpAw",
"parentPublication": {
"id": "proceedings/date/2011/4208/0",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a308",
"title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a308/1MeoG3BDZ7y",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxWcHee",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"acronym": "icassp",
"groupId": "1000002",
"volume": "4",
"displayVolume": "4",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzUPpsJ",
"doi": "10.1109/ICASSP.2004.1326750",
"title": "A real time implementation and an evaluation of an optimal filtering technique for noise reduction in dual microphone hearing aids",
"normalizedTitle": "A real time implementation and an evaluation of an optimal filtering technique for noise reduction in dual microphone hearing aids",
"abstract": "A real time implementation and an evaluation of a singular value decomposition (SVD) based optimal filtering technique (Doclo, S. and Moonen, M., IEEE Trans. Sig. Process., vol.50, no.9, p.2230-44, 2002) for noise reduction in a dual microphone BTE (behind-the-ear) hearing aid is presented. A method to improve the performance of a voice activity detector (VAD) is described and evaluated physically. This method is used in the real time implementation of the optimal filtering technique. A perceptual evaluation by normal hearing subjects is carried out for single and multiple jammer sound sources with speech weighted noise. The SVD-based technique can perform as well as an adaptive beamformer strategy (Maj, J.B. et al., Ear and Hearing, 2003) in a single noise scenario (i.e. the ideal scenario for the latter technique), and, can outperform the beamformer technique in a multiple noise sources scenario.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A real time implementation and an evaluation of a singular value decomposition (SVD) based optimal filtering technique (Doclo, S. and Moonen, M., IEEE Trans. Sig. Process., vol.50, no.9, p.2230-44, 2002) for noise reduction in a dual microphone BTE (behind-the-ear) hearing aid is presented. A method to improve the performance of a voice activity detector (VAD) is described and evaluated physically. This method is used in the real time implementation of the optimal filtering technique. A perceptual evaluation by normal hearing subjects is carried out for single and multiple jammer sound sources with speech weighted noise. The SVD-based technique can perform as well as an adaptive beamformer strategy (Maj, J.B. et al., Ear and Hearing, 2003) in a single noise scenario (i.e. the ideal scenario for the latter technique), and, can outperform the beamformer technique in a multiple noise sources scenario.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A real time implementation and an evaluation of a singular value decomposition (SVD) based optimal filtering technique (Doclo, S. and Moonen, M., IEEE Trans. Sig. Process., vol.50, no.9, p.2230-44, 2002) for noise reduction in a dual microphone BTE (behind-the-ear) hearing aid is presented. A method to improve the performance of a voice activity detector (VAD) is described and evaluated physically. This method is used in the real time implementation of the optimal filtering technique. A perceptual evaluation by normal hearing subjects is carried out for single and multiple jammer sound sources with speech weighted noise. The SVD-based technique can perform as well as an adaptive beamformer strategy (Maj, J.B. et al., Ear and Hearing, 2003) in a single noise scenario (i.e. the ideal scenario for the latter technique), and, can outperform the beamformer technique in a multiple noise sources scenario.",
"fno": "01326750",
"keywords": [
"Speech Processing",
"Hearing Aids",
"Filtering Theory",
"Acoustic Noise",
"Random Noise",
"Signal Denoising",
"Array Signal Processing",
"Singular Value Decomposition",
"Signal Detection",
"Speech Intelligibility",
"Real Time Implementation",
"Optimal Filtering Technique",
"Noise Reduction",
"Dual Microphone Hearing Aids",
"Singular Value Decomposition",
"SVD",
"Voice Activity Detector",
"VAD",
"Jammer Sound Sources",
"Speech Weighted Noise",
"Speech Intelligibility",
"Behind The Ear Hearing Aid",
"Filtering",
"Noise Reduction",
"Microphones",
"Hearing Aids",
"Auditory System",
"Acoustic Noise",
"Singular Value Decomposition",
"Detectors",
"Jamming",
"Speech Enhancement"
],
"authors": [
{
"affiliation": "Lab.Exp.ORL, Katholieke Univ., Leuven, Belgium",
"fullName": "J.B. Maj",
"givenName": "J.B.",
"surname": "Maj",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lab.Exp.ORL, Katholieke Univ., Leuven, Belgium",
"fullName": "L. Royackers",
"givenName": "L.",
"surname": "Royackers",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lab.Exp.ORL, Katholieke Univ., Leuven, Belgium",
"fullName": "J. Wouters",
"givenName": "J.",
"surname": "Wouters",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icassp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-01-01T00:00:00",
"pubType": "proceedings",
"pages": "iv-9-12 vol.4",
"year": "2004",
"issn": "1520-6149",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01326749",
"articleId": "12OmNxj23k8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01326751",
"articleId": "12OmNylsZWp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2018/6060/0/606001a223",
"title": "A (Lack of) Review on Cyber-Security and Privacy Concerns in Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2018/606001a223/12OmNBuL175",
"parentPublication": {
"id": "proceedings/cbms/2018/6060/0",
"title": "2018 IEEE 31st International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itag/2016/3738/0/3738a077",
"title": "User Involvement in Design and Application of Virtual Reality Gamification to Facilitate the Use of Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/itag/2016/3738a077/12OmNCzsKFe",
"parentPublication": {
"id": "proceedings/itag/2016/3738/0",
"title": "2016 International Conference on Interactive Technologies and Games (iTAG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1990/2180/1/00523294",
"title": "Constrained optimum filtering for multi-microphone digital hearing aids",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1990/00523294/12OmNqBKTSV",
"parentPublication": {
"id": "proceedings/acssc/1990/2180/2",
"title": "1990 Conference Record Twenty-Fourth Asilomar Conference on Signals, Systems and Computers, 1990.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/4/05745535",
"title": "Towards SNR-loss restoration in digital hearing AIDS",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745535/12OmNvSbBpm",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isai/2016/1585/0/1585a347",
"title": "Speech Recognition and Synthesis Algorithm for Digital Hearing Aids under Background Noise",
"doi": null,
"abstractUrl": "/proceedings-article/isai/2016/1585a347/12OmNx8wTgO",
"parentPublication": {
"id": "proceedings/isai/2016/1585/0",
"title": "2016 International Conference on Information System and Artificial Intelligence (ISAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2012/05/05740627",
"title": "Sub μW Noise Reduction for CIC Hearing Aids",
"doi": null,
"abstractUrl": "/journal/si/2012/05/05740627/13rRUIIVlig",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a020",
"title": "A Wind Noise Detection and Suppression Method in Digital Hearing Aid",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a020/1GNuAAgSfsI",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998401",
"title": "Superhuman Hearing - Virtual Prototyping of Artificial Hearing: a Case Study on Interactions and Acoustic Beamforming",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998401/1hrXgAAK6NW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1ftOBDg",
"doi": "10.1109/VR.2018.8446298",
"title": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids",
"normalizedTitle": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids",
"abstract": "3D Tune-In is an EU-funded project which brings together the relevant stakeholders from the videogame industry, academic institutions, a large hearing aid manufacturer, and hearing communities, to produce digital games in the field of hearing aid technologies and hearing loss [2] [3]. The project has now completed the development of the 3D Tune-In Toolkit [1], a flexible, cross-platform library of code and guidelines that gives traditional game and software developers access to high-quality sound spatialisation (both for headphones and loudspeakers), hearing loss and hearing aid simulations. The test application for the Toolkit is currently available for free through the 3D Tune-In project website (http://3d-tune-in.eu/). The C++ code will be released open-source through GitHub in Spring 2018. In addition to the Toolkit, 3D Tune-In has produced 5 different applications aimed at different groups of the hearing impaired and non-hearing impaired communities. The video briefly describes the project context, goals and main outcomes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D Tune-In is an EU-funded project which brings together the relevant stakeholders from the videogame industry, academic institutions, a large hearing aid manufacturer, and hearing communities, to produce digital games in the field of hearing aid technologies and hearing loss [2] [3]. The project has now completed the development of the 3D Tune-In Toolkit [1], a flexible, cross-platform library of code and guidelines that gives traditional game and software developers access to high-quality sound spatialisation (both for headphones and loudspeakers), hearing loss and hearing aid simulations. The test application for the Toolkit is currently available for free through the 3D Tune-In project website (http://3d-tune-in.eu/). The C++ code will be released open-source through GitHub in Spring 2018. In addition to the Toolkit, 3D Tune-In has produced 5 different applications aimed at different groups of the hearing impaired and non-hearing impaired communities. The video briefly describes the project context, goals and main outcomes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D Tune-In is an EU-funded project which brings together the relevant stakeholders from the videogame industry, academic institutions, a large hearing aid manufacturer, and hearing communities, to produce digital games in the field of hearing aid technologies and hearing loss [2] [3]. The project has now completed the development of the 3D Tune-In Toolkit [1], a flexible, cross-platform library of code and guidelines that gives traditional game and software developers access to high-quality sound spatialisation (both for headphones and loudspeakers), hearing loss and hearing aid simulations. The test application for the Toolkit is currently available for free through the 3D Tune-In project website (http://3d-tune-in.eu/). The C++ code will be released open-source through GitHub in Spring 2018. In addition to the Toolkit, 3D Tune-In has produced 5 different applications aimed at different groups of the hearing impaired and non-hearing impaired communities. The video briefly describes the project context, goals and main outcomes.",
"fno": "08446298",
"keywords": [
"Computer Aided Instruction",
"Computer Games",
"Handicapped Aids",
"Hearing Aids",
"Hearing Communities",
"Aid Technologies",
"Hearing Loss",
"Traditional Game",
"Hearing Aid Simulations",
"Hearing Aid Manufacturer",
"Software Developers",
"3 D Tune In",
"3 D Games",
"Hearing Aids Learning",
"Hearing Impaired Communities",
"Three Dimensional Displays",
"Hearing Aids",
"Auditory System",
"Games",
"Tuning",
"Solid Modeling",
"Open Source Software",
"3 D Sound",
"Binaural",
"Ambisonic",
"Serious Games",
"Hearing Aids",
"Hearing Loss Human Centred Computing Audio Feedback",
"Social And Professional Topics Assistive Technologies"
],
"authors": [
{
"affiliation": "Dyson School of Design Engineering - Imperial College London, On behalf of the 3D Tune-In consortium, UK",
"fullName": "Lorenzo Picinali",
"givenName": "Lorenzo",
"surname": "Picinali",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "836-836",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446366",
"articleId": "13bd1AITnaG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446157",
"articleId": "13bd1gQYgEq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itag/2016/3738/0/3738a077",
"title": "User Involvement in Design and Application of Virtual Reality Gamification to Facilitate the Use of Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/itag/2016/3738a077/12OmNCzsKFe",
"parentPublication": {
"id": "proceedings/itag/2016/3738/0",
"title": "2016 International Conference on Interactive Technologies and Games (iTAG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1990/2180/1/00523294",
"title": "Constrained optimum filtering for multi-microphone digital hearing aids",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1990/00523294/12OmNqBKTSV",
"parentPublication": {
"id": "proceedings/acssc/1990/2180/2",
"title": "1990 Conference Record Twenty-Fourth Asilomar Conference on Signals, Systems and Computers, 1990.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2000/0862/0/08620270",
"title": "New Generation Intelligent Hearing Prosthetics",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2000/08620270/12OmNy87Qwi",
"parentPublication": {
"id": "proceedings/bibe/2000/0862/0",
"title": "13th IEEE International Conference on BioInformatics and BioEngineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/an/2011/02/man2011020024",
"title": "Hearing Aids and the History of Electronics Miniaturization",
"doi": null,
"abstractUrl": "/magazine/an/2011/02/man2011020024/13rRUx0xPCe",
"parentPublication": {
"id": "mags/an",
"title": "IEEE Annals of the History of Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a308",
"title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a308/1MeoG3BDZ7y",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirz",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"acronym": "sive",
"groupId": "1805064",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VsBU4f",
"doi": "10.1109/SIVE.2018.8577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"normalizedTitle": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"abstract": "The 3DTI Toolkit is a standard C++ library for audio spatialisation and simulation using loudspeakers or headphones developed within the 3D Tune-In (3DTI) project (http://www.3d-tune-in.eu), which aims at using 3D sound and simulating hearing loss and hearing aids within virtual environments and games. The Toolkit allows the design and rendering of highly realistic and immersive 3D audio, and the simulation of virtual hearing aid devices and of different typologies of hearing loss. The library includes a real-time 3D binaural audio renderer offering full 3D spatialization based on efficient Head Related Transfer Function (HRTF) convolution, including smooth interpolation among impulse responses, customization of listener head radius and specific simulation of far-distance and near-field effects. In addition, spatial reverberation is simulated in real time using a uniformly partitioned convolution with Binaural Room Impulse Responses (BRIRs) employing a virtual Ambisonic approach. The 3D Tune-In Toolkit includes also a loudspeaker-based spatialiser implemented using Ambisonic encoding/decoding. This poster presents a brief overview of the main features of the Toolkit, which is released open-source under GPL v3 license (the code is available in GitHub https://github.com/3DTune-In/3dti_AudioToolkit).",
"abstracts": [
{
"abstractType": "Regular",
"content": "The 3DTI Toolkit is a standard C++ library for audio spatialisation and simulation using loudspeakers or headphones developed within the 3D Tune-In (3DTI) project (http://www.3d-tune-in.eu), which aims at using 3D sound and simulating hearing loss and hearing aids within virtual environments and games. The Toolkit allows the design and rendering of highly realistic and immersive 3D audio, and the simulation of virtual hearing aid devices and of different typologies of hearing loss. The library includes a real-time 3D binaural audio renderer offering full 3D spatialization based on efficient Head Related Transfer Function (HRTF) convolution, including smooth interpolation among impulse responses, customization of listener head radius and specific simulation of far-distance and near-field effects. In addition, spatial reverberation is simulated in real time using a uniformly partitioned convolution with Binaural Room Impulse Responses (BRIRs) employing a virtual Ambisonic approach. The 3D Tune-In Toolkit includes also a loudspeaker-based spatialiser implemented using Ambisonic encoding/decoding. This poster presents a brief overview of the main features of the Toolkit, which is released open-source under GPL v3 license (the code is available in GitHub https://github.com/3DTune-In/3dti_AudioToolkit).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The 3DTI Toolkit is a standard C++ library for audio spatialisation and simulation using loudspeakers or headphones developed within the 3D Tune-In (3DTI) project (http://www.3d-tune-in.eu), which aims at using 3D sound and simulating hearing loss and hearing aids within virtual environments and games. The Toolkit allows the design and rendering of highly realistic and immersive 3D audio, and the simulation of virtual hearing aid devices and of different typologies of hearing loss. The library includes a real-time 3D binaural audio renderer offering full 3D spatialization based on efficient Head Related Transfer Function (HRTF) convolution, including smooth interpolation among impulse responses, customization of listener head radius and specific simulation of far-distance and near-field effects. In addition, spatial reverberation is simulated in real time using a uniformly partitioned convolution with Binaural Room Impulse Responses (BRIRs) employing a virtual Ambisonic approach. The 3D Tune-In Toolkit includes also a loudspeaker-based spatialiser implemented using Ambisonic encoding/decoding. This poster presents a brief overview of the main features of the Toolkit, which is released open-source under GPL v3 license (the code is available in GitHub https://github.com/3DTune-In/3dti_AudioToolkit).",
"fno": "08577076",
"keywords": [
"Auditory System",
"Hearing Aids",
"Three Dimensional Displays",
"Loudspeakers",
"Convolution",
"Libraries",
"Reverberation",
"3 D Sound",
"Binaural",
"Ambisonic",
"Hearing Aid",
"Hearing Loss",
"Virtual Environment",
"Human Centred Computing Audio Feedback",
"Social And Professional Topics Assistive Technologies"
],
"authors": [
{
"affiliation": "DIANA Research Group, Universidad de Malaga",
"fullName": "Maria Cuevas-Rodríguez",
"givenName": "Maria",
"surname": "Cuevas-Rodríguez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DIANA Research Group, Universidad de Malaga",
"fullName": "Daniel González-Toledo",
"givenName": "Daniel",
"surname": "González-Toledo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DIANA Research Group, Universidad de Malaga",
"fullName": "Ernesto de la Rubia-Cuestas",
"givenName": "Ernesto de",
"surname": "la Rubia-Cuestas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DIANA Research Group, Universidad de Malaga",
"fullName": "Carlos Garre",
"givenName": "Carlos",
"surname": "Garre",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DIANA Research Group, Universidad de Malaga",
"fullName": "Luis Molina-Tanco",
"givenName": "Luis",
"surname": "Molina-Tanco",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "DIANA Research Group, Universidad de Malaga",
"fullName": "Arcadio Reyes-Lecuona",
"givenName": "Arcadio",
"surname": "Reyes-Lecuona",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dyson School of Design Engineering, Imperial College London",
"fullName": "David Poirier-Quinot",
"givenName": "David",
"surname": "Poirier-Quinot",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dyson School of Design Engineering, Imperial College London",
"fullName": "Lorenzo Picinali",
"givenName": "Lorenzo",
"surname": "Picinali",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sive",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-3",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-5713-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08577104",
"articleId": "17D45Xctton",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08577177",
"articleId": "17D45XoXP3w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itag/2016/3738/0/3738a077",
"title": "User Involvement in Design and Application of Virtual Reality Gamification to Facilitate the Use of Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/itag/2016/3738a077/12OmNCzsKFe",
"parentPublication": {
"id": "proceedings/itag/2016/3738/0",
"title": "2016 International Conference on Interactive Technologies and Games (iTAG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2000/0862/0/08620270",
"title": "New Generation Intelligent Hearing Prosthetics",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2000/08620270/12OmNy87Qwi",
"parentPublication": {
"id": "proceedings/bibe/2000/0862/0",
"title": "13th IEEE International Conference on BioInformatics and BioEngineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2011/4208/0/05763297",
"title": "A 0.964mW digital hearing aid system",
"doi": null,
"abstractUrl": "/proceedings-article/date/2011/05763297/12OmNzUPpAw",
"parentPublication": {
"id": "proceedings/date/2011/4208/0",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446298",
"title": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446298/13bd1ftOBDg",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a296",
"title": "Big Data Analytics in Healthcare: Design and Implementation for a Hearing Aid Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a296/19RSs2R4Q5a",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a308",
"title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a308/1MeoG3BDZ7y",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "19RSpV3wvxS",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"acronym": "sitis",
"groupId": "1002425",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19RSs2R4Q5a",
"doi": "10.1109/SITIS.2018.00052",
"title": "Big Data Analytics in Healthcare: Design and Implementation for a Hearing Aid Case Study",
"normalizedTitle": "Big Data Analytics in Healthcare: Design and Implementation for a Hearing Aid Case Study",
"abstract": "Modern hearing aids (HAs) are not simple passive sound enhancers, but rather complex devices that can log (via smart-phones) multivariate real-time data from the acoustic environment of a user. In the EVOTION project (www.h2020evotion.eu) such hearing aids are integrated with a Big Data analytics (BDA) platform to bring about ecologically valid evidence for policy-making within the hearing healthcare sector. Here, we present the background of the BDA platform and a concrete case study of how longitudinally sampled data from HAs can 1) support hypotheses about HA usage prognosis, and 2) bring new knowledge of how HAs are used across a typical day. In five participants, we found that the hourly HA usage was negatively associated with both the mean and the variance of the signal-to-noise ratio, and that increases in the daily total HA usage were associated with higher and more diverse sound levels.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modern hearing aids (HAs) are not simple passive sound enhancers, but rather complex devices that can log (via smart-phones) multivariate real-time data from the acoustic environment of a user. In the EVOTION project (www.h2020evotion.eu) such hearing aids are integrated with a Big Data analytics (BDA) platform to bring about ecologically valid evidence for policy-making within the hearing healthcare sector. Here, we present the background of the BDA platform and a concrete case study of how longitudinally sampled data from HAs can 1) support hypotheses about HA usage prognosis, and 2) bring new knowledge of how HAs are used across a typical day. In five participants, we found that the hourly HA usage was negatively associated with both the mean and the variance of the signal-to-noise ratio, and that increases in the daily total HA usage were associated with higher and more diverse sound levels.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modern hearing aids (HAs) are not simple passive sound enhancers, but rather complex devices that can log (via smart-phones) multivariate real-time data from the acoustic environment of a user. In the EVOTION project (www.h2020evotion.eu) such hearing aids are integrated with a Big Data analytics (BDA) platform to bring about ecologically valid evidence for policy-making within the hearing healthcare sector. Here, we present the background of the BDA platform and a concrete case study of how longitudinally sampled data from HAs can 1) support hypotheses about HA usage prognosis, and 2) bring new knowledge of how HAs are used across a typical day. In five participants, we found that the hourly HA usage was negatively associated with both the mean and the variance of the signal-to-noise ratio, and that increases in the daily total HA usage were associated with higher and more diverse sound levels.",
"fno": "938500a296",
"keywords": [
"Task Analysis",
"Big Data",
"Hearing Aids",
"Auditory System",
"Data Analysis",
"Tools",
"Hearing Aids",
"Big Data Analytics",
"Mixed Models",
"Multilevel Clustered Data",
"Evidence Based Public Health Policies"
],
"authors": [
{
"affiliation": null,
"fullName": "Jeppe Høy Christensen",
"givenName": "Jeppe Høy",
"surname": "Christensen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Michael Kai Petersen",
"givenName": "Michael Kai",
"surname": "Petersen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Niels Henrik Pontoppidan",
"givenName": "Niels Henrik",
"surname": "Pontoppidan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marco Cremonini",
"givenName": "Marco",
"surname": "Cremonini",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sitis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-11-01T00:00:00",
"pubType": "proceedings",
"pages": "296-303",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9385-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "938500a289",
"articleId": "19RSsiivPOM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "938500a304",
"articleId": "19RSrPrXleg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibe/2017/1324/0/132401a525",
"title": "Public Health Policy for Management of Hearing Impairments Based on Big Data Analytics: EVOTION at Genesis",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2017/132401a525/12OmNBE7Mqs",
"parentPublication": {
"id": "proceedings/bibe/2017/1324/0",
"title": "2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2006/1/1/01656849",
"title": "Two-Phase Resonant Clocking for Ultra-Low-Power Hearing Aid Applications",
"doi": null,
"abstractUrl": "/proceedings-article/date/2006/01656849/12OmNqGA56W",
"parentPublication": {
"id": "proceedings/date/2006/1/1",
"title": "2006 Design, Automation and Test in Europe",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2011/4208/0/05763297",
"title": "A 0.964mW digital hearing aid system",
"doi": null,
"abstractUrl": "/proceedings-article/date/2011/05763297/12OmNzUPpAw",
"parentPublication": {
"id": "proceedings/date/2011/4208/0",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a308",
"title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a308/1MeoG3BDZ7y",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998401",
"title": "Superhuman Hearing - Virtual Prototyping of Artificial Hearing: a Case Study on Interactions and Acoustic Beamforming",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998401/1hrXgAAK6NW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1GNuxELfmAU",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"acronym": "cncit",
"groupId": "1847745",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1GNuyw8GC76",
"doi": "10.1109/CNCIT56797.2022.00011",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"normalizedTitle": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"abstract": "Wearing hearing aids is one of the effective measures to correct the hearing condition of patients with hearing loss. The choice of the prescription formula is the key to hearing aid gain compensation. DSL-v5 is a commonly used fitting prescription formula, which is designed mainly for pediatric patients. It introduces the Real-Ear-to-Coupler Difference (RECD) test, taking into account the patient's age and hearing threshold, adding an understanding of hearing test methods, hearing aid types, microphone position effects, and hearing assessment with plug-in headphones. To aggregate various factors, this paper proposes a DSL-v5 fitting formula through a least squares-based polynomial fitting method, and verifies the accuracy of this fitting formula by testing experiments. The experiments demonstrate that the fitting formula can well meet the gain compensation needs of adult patients and pediatric patients.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wearing hearing aids is one of the effective measures to correct the hearing condition of patients with hearing loss. The choice of the prescription formula is the key to hearing aid gain compensation. DSL-v5 is a commonly used fitting prescription formula, which is designed mainly for pediatric patients. It introduces the Real-Ear-to-Coupler Difference (RECD) test, taking into account the patient's age and hearing threshold, adding an understanding of hearing test methods, hearing aid types, microphone position effects, and hearing assessment with plug-in headphones. To aggregate various factors, this paper proposes a DSL-v5 fitting formula through a least squares-based polynomial fitting method, and verifies the accuracy of this fitting formula by testing experiments. The experiments demonstrate that the fitting formula can well meet the gain compensation needs of adult patients and pediatric patients.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wearing hearing aids is one of the effective measures to correct the hearing condition of patients with hearing loss. The choice of the prescription formula is the key to hearing aid gain compensation. DSL-v5 is a commonly used fitting prescription formula, which is designed mainly for pediatric patients. It introduces the Real-Ear-to-Coupler Difference (RECD) test, taking into account the patient's age and hearing threshold, adding an understanding of hearing test methods, hearing aid types, microphone position effects, and hearing assessment with plug-in headphones. To aggregate various factors, this paper proposes a DSL-v5 fitting formula through a least squares-based polynomial fitting method, and verifies the accuracy of this fitting formula by testing experiments. The experiments demonstrate that the fitting formula can well meet the gain compensation needs of adult patients and pediatric patients.",
"fno": "529600a014",
"keywords": [
"Ear",
"Headphones",
"Hearing",
"Hearing Aids",
"Least Squares Approximations",
"Paediatrics",
"Patient Diagnosis",
"Polynomial Approximation",
"Wear",
"Squares Based Polynomial Fitting Method",
"Testing Experiments",
"Gain Compensation Needs",
"Adult Patients",
"Pediatric Patients",
"Aid Fitting Formulas",
"Polynomial Approximation",
"Hearing Aids",
"Effective Measures",
"Hearing Loss",
"Aid Gain Compensation",
"Commonly Used Fitting Prescription Formula",
"Real Ear To Coupler Difference Test",
"Patient",
"Test Methods",
"Aid Types",
"Microphone Position Effects",
"Hearing Assessment",
"DSL V 5 Fitting Formula",
"Headphones",
"Pediatrics",
"Aggregates",
"Fitting",
"Auditory System",
"Hearing Aids",
"Loss Measurement",
"DSL V 5",
"Fitting Formula",
"Pediatric Hearing Aid Fitting",
"Polynomial Approximation"
],
"authors": [
{
"affiliation": "School of Information Science and Engineering, Southeast University,Nanjing,China",
"fullName": "Jie Wang",
"givenName": "Jie",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Science and Engineering, Southeast University,Nanjing,China",
"fullName": "Mengjie Ju",
"givenName": "Mengjie",
"surname": "Ju",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cncit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "14-19",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5296-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "529600a006",
"articleId": "1GNuBklhHnW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "529600a020",
"articleId": "1GNuAAgSfsI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/1991/0003/0/00151068",
"title": "A digital filterbank hearing aid-design, implementation and evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00151068/12OmNvAiSpE",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2011/4208/0/05763297",
"title": "A 0.964mW digital hearing aid system",
"doi": null,
"abstractUrl": "/proceedings-article/date/2011/05763297/12OmNzUPpAw",
"parentPublication": {
"id": "proceedings/date/2011/4208/0",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446298",
"title": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446298/13bd1ftOBDg",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a296",
"title": "Big Data Analytics in Healthcare: Design and Implementation for a Hearing Aid Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a296/19RSs2R4Q5a",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a308",
"title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a308/1MeoG3BDZ7y",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998401",
"title": "Superhuman Hearing - Virtual Prototyping of Artificial Hearing: a Case Study on Interactions and Acoustic Beamforming",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998401/1hrXgAAK6NW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1MeoElmyyEo",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"acronym": "sitis",
"groupId": "10089803",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1MeoG3BDZ7y",
"doi": "10.1109/SITIS57111.2022.00053",
"title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"normalizedTitle": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"abstract": "Understanding the factors that contribute to optimal hearing aid fitting and hearing aid user experiences is crucial in order to increase the satisfaction and quality of life of hearing loss patients, as well as reduce societal and financial burdens. This work proposes a novel framework that uses Encoder-decoder with attention mechanism (attn-ED) for predicting future hearing aid usage and SHAP to explain the factors contributing to this prediction. It has been demonstrated in experiments that attn-ED performs well at predicting future hearing aid usage, and that SHAP can be utilized to calculate the contribution of different factors affecting hearing aid usage. This framework aims to establish confidence that AI models can be utilized in the medical domain with the use of XAI methods. Moreover, the proposed framework can also assist clinicians in determining the nature of interventions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Understanding the factors that contribute to optimal hearing aid fitting and hearing aid user experiences is crucial in order to increase the satisfaction and quality of life of hearing loss patients, as well as reduce societal and financial burdens. This work proposes a novel framework that uses Encoder-decoder with attention mechanism (attn-ED) for predicting future hearing aid usage and SHAP to explain the factors contributing to this prediction. It has been demonstrated in experiments that attn-ED performs well at predicting future hearing aid usage, and that SHAP can be utilized to calculate the contribution of different factors affecting hearing aid usage. This framework aims to establish confidence that AI models can be utilized in the medical domain with the use of XAI methods. Moreover, the proposed framework can also assist clinicians in determining the nature of interventions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Understanding the factors that contribute to optimal hearing aid fitting and hearing aid user experiences is crucial in order to increase the satisfaction and quality of life of hearing loss patients, as well as reduce societal and financial burdens. This work proposes a novel framework that uses Encoder-decoder with attention mechanism (attn-ED) for predicting future hearing aid usage and SHAP to explain the factors contributing to this prediction. It has been demonstrated in experiments that attn-ED performs well at predicting future hearing aid usage, and that SHAP can be utilized to calculate the contribution of different factors affecting hearing aid usage. This framework aims to establish confidence that AI models can be utilized in the medical domain with the use of XAI methods. Moreover, the proposed framework can also assist clinicians in determining the nature of interventions.",
"fno": "649500a308",
"keywords": [
"Hearing",
"Hearing Aids",
"Aid User Experiences",
"Attention Mechanism",
"Hearing Aid",
"Employee Welfare",
"Adaptation Models",
"Fitting",
"Auditory System",
"Predictive Models",
"Hearing Aids",
"Stakeholders",
"XAI",
"Hearing Loss",
"Encoder Decoder",
"Attention Mechanism",
"Hearing Aid Usage"
],
"authors": [
{
"affiliation": "University of London,Department of Computer Science City,London,UK",
"fullName": "Qiqi Su",
"givenName": "Qiqi",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National and Kapodistrian University of Athens Medical School,Department of Otorhinolaryngology Head and Neck Surgery,Athens,Greece",
"fullName": "Eleftheria Iliadou",
"givenName": "Eleftheria",
"surname": "Iliadou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sitis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "308-315",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6495-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "649500a300",
"articleId": "1MeoG9Ki4y4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "649500a316",
"articleId": "1MeoGnEV2Q8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itag/2016/3738/0/3738a077",
"title": "User Involvement in Design and Application of Virtual Reality Gamification to Facilitate the Use of Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/itag/2016/3738a077/12OmNCzsKFe",
"parentPublication": {
"id": "proceedings/itag/2016/3738/0",
"title": "2016 International Conference on Interactive Technologies and Games (iTAG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00151068",
"title": "A digital filterbank hearing aid-design, implementation and evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00151068/12OmNvAiSpE",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2011/4208/0/05763297",
"title": "A 0.964mW digital hearing aid system",
"doi": null,
"abstractUrl": "/proceedings-article/date/2011/05763297/12OmNzUPpAw",
"parentPublication": {
"id": "proceedings/date/2011/4208/0",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446298",
"title": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446298/13bd1ftOBDg",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a296",
"title": "Big Data Analytics in Healthcare: Design and Implementation for a Hearing Aid Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a296/19RSs2R4Q5a",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cTI8geedm8",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"acronym": "services",
"groupId": "1800492",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cTIczQh5vi",
"doi": "10.1109/SERVICES.2019.00086",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"normalizedTitle": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"abstract": "Modern hearing aids are not simple passive sound enhancers, but rather complex devices that can log (via smartphones) multivariate real-time data from the acoustic environment of a user. In the evotion project (http://h2020evotion.eu) such hearing aids are integrated with a Big Data analytics platform to bring about ecologically valid evidence to support the hearing healthcare sector. Here, we present the background of the Big Data analytics platform and demonstrate that modeling of longitudinally sampled data from hearing aids can support clinical investigations with hypotheses about hearing aid usage prognosis, and support public health decision-making within the hearing healthcare sector by simulation techniques. We found, that distinct characteristics of the acoustic environment significantly modulate how hearing impaired individuals use their hearing aids. Higher sound levels and an increased sound diversity but degraded signal quality all predicts more minutes of use per hour. By simulation, we show that a projected increase in the overall sound levels by 10dB followed by a 4dB increase in noise exposure will increase the need for hearing aid use by an additional 1 hour/day across a population of hearing impaired hearing aid users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modern hearing aids are not simple passive sound enhancers, but rather complex devices that can log (via smartphones) multivariate real-time data from the acoustic environment of a user. In the evotion project (http://h2020evotion.eu) such hearing aids are integrated with a Big Data analytics platform to bring about ecologically valid evidence to support the hearing healthcare sector. Here, we present the background of the Big Data analytics platform and demonstrate that modeling of longitudinally sampled data from hearing aids can support clinical investigations with hypotheses about hearing aid usage prognosis, and support public health decision-making within the hearing healthcare sector by simulation techniques. We found, that distinct characteristics of the acoustic environment significantly modulate how hearing impaired individuals use their hearing aids. Higher sound levels and an increased sound diversity but degraded signal quality all predicts more minutes of use per hour. By simulation, we show that a projected increase in the overall sound levels by 10dB followed by a 4dB increase in noise exposure will increase the need for hearing aid use by an additional 1 hour/day across a population of hearing impaired hearing aid users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modern hearing aids are not simple passive sound enhancers, but rather complex devices that can log (via smartphones) multivariate real-time data from the acoustic environment of a user. In the evotion project (http://h2020evotion.eu) such hearing aids are integrated with a Big Data analytics platform to bring about ecologically valid evidence to support the hearing healthcare sector. Here, we present the background of the Big Data analytics platform and demonstrate that modeling of longitudinally sampled data from hearing aids can support clinical investigations with hypotheses about hearing aid usage prognosis, and support public health decision-making within the hearing healthcare sector by simulation techniques. We found, that distinct characteristics of the acoustic environment significantly modulate how hearing impaired individuals use their hearing aids. Higher sound levels and an increased sound diversity but degraded signal quality all predicts more minutes of use per hour. By simulation, we show that a projected increase in the overall sound levels by 10dB followed by a 4dB increase in noise exposure will increase the need for hearing aid use by an additional 1 hour/day across a population of hearing impaired hearing aid users.",
"fno": "385100a307",
"keywords": [
"Big Data",
"Data Analysis",
"Health Care",
"Hearing Aids",
"Medical Computing",
"Noise Exposure",
"Sound Diversity",
"Public Health Decision Making",
"Hearing Aid Usage Prognosis",
"Multivariate Real Time Data",
"Real Time Hearing Aid Data",
"Big Data",
"Longitudinally Sampled Data",
"Hearing Healthcare Sector",
"Acoustic Environment",
"Noise Figure 10 0 D B",
"Noise Figure 4 0 D B",
"Task Analysis",
"Big Data",
"Hearing Aids",
"Data Models",
"Auditory System",
"Acoustics",
"Predictive Models",
"Hearing Aids",
"Big Data Analytics",
"Mixed Models",
"Multilevel Clustered Data",
"Evidence Based Public Health Policies"
],
"authors": [
{
"affiliation": "Eriksholm Research Centre",
"fullName": "Jeppe Christensen",
"givenName": "Jeppe",
"surname": "Christensen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Eriksholm Research Centre",
"fullName": "Niels Pontoppidan",
"givenName": "Niels",
"surname": "Pontoppidan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Università degli Studi di Milano",
"fullName": "Marco Anisetti",
"givenName": "Marco",
"surname": "Anisetti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Università degli Studi di Milano",
"fullName": "Valerio Bellandi",
"givenName": "Valerio",
"surname": "Bellandi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Università degli Studi di Milano",
"fullName": "Marco Cremonini",
"givenName": "Marco",
"surname": "Cremonini",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "services",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-07-01T00:00:00",
"pubType": "proceedings",
"pages": "307-313",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3851-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "385100z023",
"articleId": "1cTIcJbCnYY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "385100z024",
"articleId": "1cTIfAib9II",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2002/7402/4/05745533",
"title": "Signal processing, hearing aid design, and the psychoacoustic turing test",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745533/12OmNrAv3Gu",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2011/4208/0/05763297",
"title": "A 0.964mW digital hearing aid system",
"doi": null,
"abstractUrl": "/proceedings-article/date/2011/05763297/12OmNzUPpAw",
"parentPublication": {
"id": "proceedings/date/2011/4208/0",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cpsna/2014/5387/0/5387a055",
"title": "An Intelligent Hearing Aid System Based on Real-Time Signal Processing",
"doi": null,
"abstractUrl": "/proceedings-article/cpsna/2014/5387a055/12OmNzvhvEN",
"parentPublication": {
"id": "proceedings/cpsna/2014/5387/0",
"title": "2014 IEEE International Conference on Cyber-Physical Systems, Networks, and Applications (CPSNA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446298",
"title": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446298/13bd1ftOBDg",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a296",
"title": "Big Data Analytics in Healthcare: Design and Implementation for a Hearing Aid Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a296/19RSs2R4Q5a",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a308",
"title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a308/1MeoG3BDZ7y",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1ezRxla87i8",
"title": "2019 IEEE 17th International Conference on Software Engineering Research, Management and Applications (SERA)",
"acronym": "sera",
"groupId": "1001129",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1ezRz8Cf3MI",
"doi": "10.1109/SERA.2019.8886796",
"title": "Identification of Difficult English Words for Assisting Hearing Impaired Children in Learning Language",
"normalizedTitle": "Identification of Difficult English Words for Assisting Hearing Impaired Children in Learning Language",
"abstract": "Hearing impairment is a partial or total inability to hear. Children cannot learn speech and develop vocabulary at a regular pace due to which process of language development in such children is slow. For hearing impaired children, reading and writing English language text is considered to be a complex task. A Text is always composed of sentences and the sentences are made by the combination of complex and simpler words. Extensive use of complex words in a text (stories, essays, study material, or any text available on web stores) makes it difficult to read, write, understand and interpret. This causes more language inconsistency and learning problems for hearing impaired children as compared to the normal hearing peers. Proposed methodology in this paper focuses on the identification of difficult words from the English text for assisting hearing impaired children in learning the English language in a less complex way. Developed system uses C45 decision tree learning model to classify words as difficult or not difficult from the given text after the extraction of lexical and semantic features of words based on linguistic rules specific to hearing impaired children. Proposed methodology, is a full fledge learning tool that not only helps a child to improve and learn reading but also assists the therapist in conducting academic exercises and conclude that which words are difficult or which are not. Results indicate improved accuracy as compared to previous techniques used for the respective research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hearing impairment is a partial or total inability to hear. Children cannot learn speech and develop vocabulary at a regular pace due to which process of language development in such children is slow. For hearing impaired children, reading and writing English language text is considered to be a complex task. A Text is always composed of sentences and the sentences are made by the combination of complex and simpler words. Extensive use of complex words in a text (stories, essays, study material, or any text available on web stores) makes it difficult to read, write, understand and interpret. This causes more language inconsistency and learning problems for hearing impaired children as compared to the normal hearing peers. Proposed methodology in this paper focuses on the identification of difficult words from the English text for assisting hearing impaired children in learning the English language in a less complex way. Developed system uses C45 decision tree learning model to classify words as difficult or not difficult from the given text after the extraction of lexical and semantic features of words based on linguistic rules specific to hearing impaired children. Proposed methodology, is a full fledge learning tool that not only helps a child to improve and learn reading but also assists the therapist in conducting academic exercises and conclude that which words are difficult or which are not. Results indicate improved accuracy as compared to previous techniques used for the respective research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hearing impairment is a partial or total inability to hear. Children cannot learn speech and develop vocabulary at a regular pace due to which process of language development in such children is slow. For hearing impaired children, reading and writing English language text is considered to be a complex task. A Text is always composed of sentences and the sentences are made by the combination of complex and simpler words. Extensive use of complex words in a text (stories, essays, study material, or any text available on web stores) makes it difficult to read, write, understand and interpret. This causes more language inconsistency and learning problems for hearing impaired children as compared to the normal hearing peers. Proposed methodology in this paper focuses on the identification of difficult words from the English text for assisting hearing impaired children in learning the English language in a less complex way. Developed system uses C45 decision tree learning model to classify words as difficult or not difficult from the given text after the extraction of lexical and semantic features of words based on linguistic rules specific to hearing impaired children. Proposed methodology, is a full fledge learning tool that not only helps a child to improve and learn reading but also assists the therapist in conducting academic exercises and conclude that which words are difficult or which are not. Results indicate improved accuracy as compared to previous techniques used for the respective research.",
"fno": "08886796",
"keywords": [
"Computer Aided Instruction",
"Decision Trees",
"Handicapped Aids",
"Hearing",
"Linguistics",
"Natural Language Processing",
"Paediatrics",
"Text Analysis",
"Vocabulary",
"Hearing Impaired Children",
"English Words",
"English Language Text",
"Language Inconsistency",
"Learning Problems",
"Vocabulary",
"Language Development",
"Decision Tree Learning Model",
"Academic Exercises",
"Learning Tool",
"Semantic Features",
"Linguistic Rules",
"Auditory System",
"Vocabulary",
"Task Analysis",
"Hearing Aids",
"Writing",
"Text Recognition",
"Decision Trees",
"Hearing Impairment",
"Classification",
"Decision Tree",
"Difficult Words Identification",
"Data Mining"
],
"authors": [
{
"affiliation": "Department of Computer Engineering, College of E&ME, ISLAMABAD, Pakistan",
"fullName": "Munazza Ansar",
"givenName": "Munazza",
"surname": "Ansar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Engineering, College of E&ME, ISLAMABAD, Pakistan",
"fullName": "Usman Qamar",
"givenName": "Usman",
"surname": "Qamar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Engineering, College of E&ME, ISLAMABAD, Pakistan",
"fullName": "Raheela Bibi",
"givenName": "Raheela",
"surname": "Bibi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Engineering, College of E&ME, ISLAMABAD, Pakistan",
"fullName": "Asma Shaheen",
"givenName": "Asma",
"surname": "Shaheen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sera",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-05-01T00:00:00",
"pubType": "proceedings",
"pages": "60-65",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-0798-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08886801",
"articleId": "1ezRyWQJXuU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08886788",
"articleId": "1ezRySx4dQ4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2013/5009/0/5009a212",
"title": "Development of Speech Training Aid System for Hearing-Impaired Children",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2013/5009a212/12OmNB9t6ma",
"parentPublication": {
"id": "proceedings/icalt/2013/5009/0",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2012/4855/0/4855a306",
"title": "A Study of Face Motion Capture and Its Data Processing Technique Applied to the Speech Training of Hearing-impaired Children",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2012/4855a306/12OmNwe2IuM",
"parentPublication": {
"id": "proceedings/icinis/2012/4855/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdcloud/2015/7183/0/7183a203",
"title": "Design of Measurement System for Respiratory Training Device Based on Speech Training for Hearing-Impaired Children",
"doi": null,
"abstractUrl": "/proceedings-article/bdcloud/2015/7183a203/12OmNx3HI9p",
"parentPublication": {
"id": "proceedings/bdcloud/2015/7183/0",
"title": "2015 IEEE Fifth International Conference on Big Data and Cloud Computing (BDCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/caapwd/1992/2730/0/00217403",
"title": "Video-game for speech perception testing and training of young hearing-impaired children",
"doi": null,
"abstractUrl": "/proceedings-article/caapwd/1992/00217403/12OmNxdm4z8",
"parentPublication": {
"id": "proceedings/caapwd/1992/2730/0",
"title": "Proceedings of the Johns Hopkins National Search for Computing Applications to Assist Persons with Disabilities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2011/4477/0/4477a547",
"title": "The 3D Face Feature Extraction and Driving Method on Pronunciation Rehabilitation for Impaired Hearing",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2011/4477a547/12OmNyFU6WN",
"parentPublication": {
"id": "proceedings/cse/2011/4477/0",
"title": "2011 14th IEEE International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2013/5159/0/06726386",
"title": "Breath Training for Hearing Impaired Hearing Chinldre Based on Computational Fluid Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2013/06726386/12OmNzWOBfN",
"parentPublication": {
"id": "proceedings/msn/2013/5159/0",
"title": "2013 Ninth International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446298",
"title": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446298/13bd1ftOBDg",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a209",
"title": "Speech Rehabilitation System for Hearing Impaired Children Based on Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a209/1vg7WJJ3xTi",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tnY7jIfsVq",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"acronym": "icise",
"groupId": "1841104",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnYlALODdu",
"doi": "10.1109/ICISE51755.2020.00059",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"normalizedTitle": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"abstract": "In order to realize a mode of hearing aid parameters adjustment which is efficient, electronic and convenient for hearing impaired children, an auto adaptive solution of hearing aid parameters based on deep neural network sound environment classification on Android terminal is proposed. In this scheme, a deep neural network based sound environment classification method is adopted on Android terminal, and multi-frames accumulation and multi-blocks statistics are used to improve the classification accuracy, and then the corresponding hearing aid parameters are matched for different sound environment categories and sent to the hearing aid to realize the auto adaptive of hearing aid to dynamic sound environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to realize a mode of hearing aid parameters adjustment which is efficient, electronic and convenient for hearing impaired children, an auto adaptive solution of hearing aid parameters based on deep neural network sound environment classification on Android terminal is proposed. In this scheme, a deep neural network based sound environment classification method is adopted on Android terminal, and multi-frames accumulation and multi-blocks statistics are used to improve the classification accuracy, and then the corresponding hearing aid parameters are matched for different sound environment categories and sent to the hearing aid to realize the auto adaptive of hearing aid to dynamic sound environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to realize a mode of hearing aid parameters adjustment which is efficient, electronic and convenient for hearing impaired children, an auto adaptive solution of hearing aid parameters based on deep neural network sound environment classification on Android terminal is proposed. In this scheme, a deep neural network based sound environment classification method is adopted on Android terminal, and multi-frames accumulation and multi-blocks statistics are used to improve the classification accuracy, and then the corresponding hearing aid parameters are matched for different sound environment categories and sent to the hearing aid to realize the auto adaptive of hearing aid to dynamic sound environment.",
"fno": "226100a241",
"keywords": [
"Handicapped Aids",
"Hearing",
"Hearing Aids",
"Neural Nets",
"Application Design",
"Hearing Aid Parameters Auto Adaptive System",
"Impaired Children",
"Android Terminal",
"Hearing Aid Parameters Adjustment",
"Auto Adaptive Solution",
"Deep Neural Network Sound Environment Classification",
"Sound Environment Classification Method",
"Classification Accuracy",
"Corresponding Hearing Aid Parameters",
"Different Sound Environment Categories",
"Dynamic Sound Environment",
"Information Science",
"Adaptive Systems",
"Operating Systems",
"Neural Networks",
"Education",
"Auditory System",
"Hearing Aids",
"Android Terminal",
"Deep Neural Network",
"Hearing Aid"
],
"authors": [
{
"affiliation": "Zhejiang University No. 38, Zheda Road,School of Computer Science and Technology,Xihu District,Hangzhou,China,310007",
"fullName": "Xiaoqian Fan",
"givenName": "Xiaoqian",
"surname": "Fan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University No. 38, Zheda Road,School of Computer Science and Technology,Xihu District,Hangzhou,China,310007",
"fullName": "Wenzhi Chen",
"givenName": "Wenzhi",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hangzhou Youting Technology Co., Ltd No. 220, East Zone, Building A,No. 525, Xixi Road,Hangzhou,China,310007",
"fullName": "Quanfang Fan",
"givenName": "Quanfang",
"surname": "Fan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University No. 38, Zheda Road,School of Computer Science and Technology,Xihu District,Hangzhou,China,310007",
"fullName": "Tianyi Sun",
"givenName": "Tianyi",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icise",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "241-245",
"year": "2020",
"issn": null,
"isbn": "978-1-6654-2261-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "226100a237",
"articleId": "1tnYgX73TBS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "226100a246",
"articleId": "1tnYd7em69G",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2013/5009/0/5009a212",
"title": "Development of Speech Training Aid System for Hearing-Impaired Children",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2013/5009a212/12OmNB9t6ma",
"parentPublication": {
"id": "proceedings/icalt/2013/5009/0",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2011/4208/0/05763297",
"title": "A 0.964mW digital hearing aid system",
"doi": null,
"abstractUrl": "/proceedings-article/date/2011/05763297/12OmNzUPpAw",
"parentPublication": {
"id": "proceedings/date/2011/4208/0",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446298",
"title": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446298/13bd1ftOBDg",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a296",
"title": "Big Data Analytics in Healthcare: Design and Implementation for a Hearing Aid Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a296/19RSs2R4Q5a",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a308",
"title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a308/1MeoG3BDZ7y",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sera/2019/0798/0/08886796",
"title": "Identification of Difficult English Words for Assisting Hearing Impaired Children in Learning Language",
"doi": null,
"abstractUrl": "/proceedings-article/sera/2019/08886796/1ezRz8Cf3MI",
"parentPublication": {
"id": "proceedings/sera/2019/0798/0",
"title": "2019 IEEE 17th International Conference on Software Engineering Research, Management and Applications (SERA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwDACwE",
"doi": "10.1109/VR.2017.7892290",
"title": "Asymetric telecollaboration in virtual reality",
"normalizedTitle": "Asymetric telecollaboration in virtual reality",
"abstract": "We present a first study where we combine two asymétrie virtual reality systems for telecollaboration purposes: a CAVE system and a head-mounted display (HMD), using a server-client type architecture. Experiments on a puzzle game in limited time, alone and in collaboration, show that combining asymetric systems reduces cognitive load. Moreover, the participants reported preferring working in collaboration and showed to be more efficient in collaboration. These results provide insights in combining several low cost HMDs with a unique expensive CAVE.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a first study where we combine two asymétrie virtual reality systems for telecollaboration purposes: a CAVE system and a head-mounted display (HMD), using a server-client type architecture. Experiments on a puzzle game in limited time, alone and in collaboration, show that combining asymetric systems reduces cognitive load. Moreover, the participants reported preferring working in collaboration and showed to be more efficient in collaboration. These results provide insights in combining several low cost HMDs with a unique expensive CAVE.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a first study where we combine two asymétrie virtual reality systems for telecollaboration purposes: a CAVE system and a head-mounted display (HMD), using a server-client type architecture. Experiments on a puzzle game in limited time, alone and in collaboration, show that combining asymetric systems reduces cognitive load. Moreover, the participants reported preferring working in collaboration and showed to be more efficient in collaboration. These results provide insights in combining several low cost HMDs with a unique expensive CAVE.",
"fno": "07892290",
"keywords": [
"Collaboration",
"Resists",
"Atmospheric Measurements",
"Particle Measurements",
"Virtual Reality",
"Games",
"Indexes",
"Telecollaboration",
"CAVE",
"HMD"
],
"authors": [
{
"affiliation": "Ecole Polytechnique Fédéral de Lausanne, Immersive Interaction Group, Switzerland",
"fullName": "Thibault Porssut",
"givenName": "Thibault",
"surname": "Porssut",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Le2i, Arts et Métiers, CNRS, Univ., Bourgogne Franche-Comté, HeSam, Institut Image, France",
"fullName": "Jean-Rémy Chardonnet",
"givenName": "Jean-Rémy",
"surname": "Chardonnet",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "289-290",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892289",
"articleId": "12OmNzSQdoG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892291",
"articleId": "12OmNzGlRCn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892317",
"title": "A preliminary study of users' experiences of meditation in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892317/12OmNApcufx",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892230",
"title": "Cinematic virtual reality: Evaluating the effect of display type on the viewing experience for panoramic video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892230/12OmNx5GTZ2",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446269",
"title": "A Study of Cybersickness and Sensory Conflict Theory Using a Motion-Coupled Virtual Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446269/13bd1eTtWYf",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a215",
"title": "In-Place Collaboration in Extended Reality Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a215/1KaFNF7EG6k",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797858",
"title": "Immersive EEG: Evaluating Electroencephalography in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797858/1cJ0JWkSE3m",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vhcie/2017/2758/0/07935624",
"title": "Evaluating collision avoidance effects on discomfort in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vhcie/2017/07935624/1h0Lhmayehq",
"parentPublication": {
"id": "proceedings/vhcie/2017/2758/0",
"title": "2017 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998353",
"title": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998353/1hpPDKs9c7C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a005",
"title": "An Image-Based Method for Measuring Strabismus in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a005/1pBMkJ1GBSU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a629",
"title": "Myopia in Head-Worn Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a629/1tnXbsPeHbG",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09462341",
"title": "Evaluation of a Low-Cost Virtual Reality Surround-Screen Projection System",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09462341/1uDSAs8QPV6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKisy",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VTRov4",
"doi": "10.1109/ISMAR.2018.00052",
"title": "Effects of Sharing Real-Time Multi-Sensory Heart Rate Feedback in Different Immersive Collaborative Virtual Environments",
"normalizedTitle": "Effects of Sharing Real-Time Multi-Sensory Heart Rate Feedback in Different Immersive Collaborative Virtual Environments",
"abstract": "Collaboration is an important application area for virtual reality (VR). However, unlike in the real world, collaboration in VR misses important empathetic cues that can make collaborators aware of each other's emotional states. Providing physiological feedback, such as heart rate or respiration rate, to users in VR has been shown to create a positive impact in single user environments. In this paper, through a rigorous mixed-factorial user experiment, we evaluated how providing heart rate feedback to collaborators influences their collaboration in three different environments requiring different kinds of collaboration. We have found that when provided with real-time heart rate feedback participants felt the presence of the collaborator more and felt that they understood their collaborator's emotional state more. Heart rate feedback also made participants feel more dominant when performing the task. We discuss the implication of this research for collaborative VR environments, provide design guidelines, and directions for future research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Collaboration is an important application area for virtual reality (VR). However, unlike in the real world, collaboration in VR misses important empathetic cues that can make collaborators aware of each other's emotional states. Providing physiological feedback, such as heart rate or respiration rate, to users in VR has been shown to create a positive impact in single user environments. In this paper, through a rigorous mixed-factorial user experiment, we evaluated how providing heart rate feedback to collaborators influences their collaboration in three different environments requiring different kinds of collaboration. We have found that when provided with real-time heart rate feedback participants felt the presence of the collaborator more and felt that they understood their collaborator's emotional state more. Heart rate feedback also made participants feel more dominant when performing the task. We discuss the implication of this research for collaborative VR environments, provide design guidelines, and directions for future research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Collaboration is an important application area for virtual reality (VR). However, unlike in the real world, collaboration in VR misses important empathetic cues that can make collaborators aware of each other's emotional states. Providing physiological feedback, such as heart rate or respiration rate, to users in VR has been shown to create a positive impact in single user environments. In this paper, through a rigorous mixed-factorial user experiment, we evaluated how providing heart rate feedback to collaborators influences their collaboration in three different environments requiring different kinds of collaboration. We have found that when provided with real-time heart rate feedback participants felt the presence of the collaborator more and felt that they understood their collaborator's emotional state more. Heart rate feedback also made participants feel more dominant when performing the task. We discuss the implication of this research for collaborative VR environments, provide design guidelines, and directions for future research.",
"fno": "745900a165",
"keywords": [
"Feedback",
"Groupware",
"Virtual Reality",
"Immersive Collaborative Virtual Environments",
"Real Time Multisensory Heart Rate Feedback",
"Collaborative VR Environments",
"Real Time Heart Rate Feedback Participants",
"Providing Heart Rate Feedback",
"Single User Environments",
"Providing Physiological Feedback",
"Heart Rate",
"Collaboration",
"Real Time Systems",
"Physiology",
"Avatars",
"Task Analysis",
"Virtual Environments"
],
"authors": [
{
"affiliation": null,
"fullName": "Arindam Dey",
"givenName": "Arindam",
"surname": "Dey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hao Chen",
"givenName": "Hao",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chang Zhuang",
"givenName": "Chang",
"surname": "Zhuang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Robert W. Lindeman",
"givenName": "Robert W.",
"surname": "Lindeman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "165-173",
"year": "2018",
"issn": "1554-7868",
"isbn": "978-1-5386-7459-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "745900a153",
"articleId": "17D45VsBU1V",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "745900a175",
"articleId": "17D45Xh13tA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciibms/2015/8562/0/07439460",
"title": "Dynamic heart rate monitors algorithm for reflection green light wearable device",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2015/07439460/12OmNBd9T0e",
"parentPublication": {
"id": "proceedings/iciibms/2015/8562/0",
"title": "2015 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings/2014/5967/0/5967a352",
"title": "Scarecrow: Avatar Representation Using Biological Information Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ithings/2014/5967a352/12OmNCcKQP0",
"parentPublication": {
"id": "proceedings/ithings/2014/5967/0",
"title": "2014 IEEE International Conference on Internet of Things(iThings), and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2015/02/06821267",
"title": "Neuroticism, Extraversion, Conscientiousness and Stress: Physiological Correlates",
"doi": null,
"abstractUrl": "/journal/ta/2015/02/06821267/13rRUwjoNvd",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a209",
"title": "Effects of Heart Rate Feedback on an Asymmetric Platform using Augmented Reality and Laptop",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a209/1CJcCnEQ2ek",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a848",
"title": "Comparing Physiological and Emotional Effects of Happy and Sad Virtual Environments Experienced in Video and Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a848/1CJdhFm4Ez6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a751",
"title": "Effects of Augmenting Real-Time Biofeedback in An Immersive VR Performance",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a751/1J7WwJHZI4M",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csii-bcd/2017/3302/0/3302a047",
"title": "Heart Rate and Heart Rate Variability Measuring System by Using Smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csii-bcd/2017/3302a047/1cdOBdiztwA",
"parentPublication": {
"id": "proceedings/acit-csii-bcd/2017/3302/0",
"title": "2017 5th Intl Conf on Applied Computing and Information Technology/4th Intl Conf on Computational Science/Intelligence and Applied Informatics/2nd Intl Conf on Big Data, Cloud Computing, Data Science (ACIT-CSII-BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09269384",
"title": "Auditory Feedback of False Heart Rate for Video Game Experience Improvement",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09269384/1p1c3qb3XoI",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a474",
"title": "A Neurophysiological Approach for Measuring Presence in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a474/1pysuR65ESQ",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cseps/2021/2618/0/261800a319",
"title": "Detection based on non-contact heart rate measurement",
"doi": null,
"abstractUrl": "/proceedings-article/cseps/2021/261800a319/1wiQKwsaojm",
"parentPublication": {
"id": "proceedings/cseps/2021/2618/0",
"title": "2021 International Conference on Control Science and Electric Power Systems (CSEPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJcYgs1MY0",
"doi": "10.1109/VRW55335.2022.00329",
"title": "[DC]Using Multimodal Input in Augmented Virtual Teleportation",
"normalizedTitle": "[DC]Using Multimodal Input in Augmented Virtual Teleportation",
"abstract": "Augmented (AR) and Virtual Reality (VR) can create compelling emotional collaborative experiences, but very few studies have explored the importance of sharing a user's live environment and their physiological cues. In this PhD thesis, I am investigating how to use scene reconstruction and emotion recognition to enhance shared collaborative AR/VR experiences. I have developed a framework that can be broadly classified into two sections: 1) Live scene capturing for real-time environment reconstruction, 2) Sharing multimodal input such as gaze, gesture, and physiological cues. The main novelty of the research is that it is one of the first systems for real-time sharing of environment and emotion cues. It provides significant insight into how to create, measure, and share remote collaborative experiences. The research will be helpful in multiple application domains such as remote assistance, tourism, training and entertainment. It will also enable the creation of interfaces that automatically adapt to the user's emotional needs and environment and provide a better collaborative experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented (AR) and Virtual Reality (VR) can create compelling emotional collaborative experiences, but very few studies have explored the importance of sharing a user's live environment and their physiological cues. In this PhD thesis, I am investigating how to use scene reconstruction and emotion recognition to enhance shared collaborative AR/VR experiences. I have developed a framework that can be broadly classified into two sections: 1) Live scene capturing for real-time environment reconstruction, 2) Sharing multimodal input such as gaze, gesture, and physiological cues. The main novelty of the research is that it is one of the first systems for real-time sharing of environment and emotion cues. It provides significant insight into how to create, measure, and share remote collaborative experiences. The research will be helpful in multiple application domains such as remote assistance, tourism, training and entertainment. It will also enable the creation of interfaces that automatically adapt to the user's emotional needs and environment and provide a better collaborative experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented (AR) and Virtual Reality (VR) can create compelling emotional collaborative experiences, but very few studies have explored the importance of sharing a user's live environment and their physiological cues. In this PhD thesis, I am investigating how to use scene reconstruction and emotion recognition to enhance shared collaborative AR/VR experiences. I have developed a framework that can be broadly classified into two sections: 1) Live scene capturing for real-time environment reconstruction, 2) Sharing multimodal input such as gaze, gesture, and physiological cues. The main novelty of the research is that it is one of the first systems for real-time sharing of environment and emotion cues. It provides significant insight into how to create, measure, and share remote collaborative experiences. The research will be helpful in multiple application domains such as remote assistance, tourism, training and entertainment. It will also enable the creation of interfaces that automatically adapt to the user's emotional needs and environment and provide a better collaborative experience.",
"fno": "840200a956",
"keywords": [
"Emotion Recognition",
"Groupware",
"Image Reconstruction",
"Teleportation",
"Virtual Reality",
"Physiological Cues",
"Ph D Thesis",
"Scene Reconstruction",
"Emotion Recognition",
"Real Time Environment Reconstruction",
"Multimodal Input",
"Real Time Sharing",
"Emotion Cues",
"Remote Collaborative Experiences",
"Collaborative Experience",
"Augmented Virtual Teleportation",
"Virtual Reality",
"Compelling Emotional Collaborative Experiences",
"Live Scene Capturing",
"Training",
"Emotion Recognition",
"Three Dimensional Displays",
"Conferences",
"Collaboration",
"Entertainment Industry",
"Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality",
"Human Centered Computing Interaction Design Interaction Design Process And Methods"
],
"authors": [
{
"affiliation": "The University of Auckland,Empathic Computing Lab",
"fullName": "Prasanth Sasikumar",
"givenName": "Prasanth",
"surname": "Sasikumar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "956-957",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a954",
"articleId": "1CJfdg0rEyY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a958",
"articleId": "1CJeVLWmIgw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a268",
"title": "Workshop on augmented reality for good",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a268/12OmNB8Cjak",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/intetain/2015/0061/0/07325506",
"title": "GAINE — tanGible augmented interaction for edutainment",
"doi": null,
"abstractUrl": "/proceedings-article/intetain/2015/07325506/12OmNC4O4CP",
"parentPublication": {
"id": "proceedings/intetain/2015/0061/0",
"title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671795",
"title": "Study of augmented gesture communication cues and view sharing in remote collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671795/12OmNwl8GBu",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a001",
"title": "Collaboration in Mediated and Augmented Reality (CiMAR) Summary",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a001/12OmNybfqVO",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a153",
"title": "A User Study on MR Remote Collaboration Using Live 360 Video",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a153/17D45VsBU1V",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699227",
"title": "Do You Know What I Mean? An MR-Based Collaborative Platform",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699227/19F1PhUp98k",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a960",
"title": "[DC] Exploration of Context and Physiological Cues for Personalized Emotion-Adaptive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a960/1CJexFbyxUI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a393",
"title": "Wearable RemoteFusion: A Mixed Reality Remote Collaboration System with Local Eye Gaze and Remote Hand Gesture Sharing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a393/1gysjIlsYus",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998353",
"title": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998353/1hpPDKs9c7C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a453",
"title": "Using Context and Physiological Cues to Improve Emotion Recognition in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a453/1yfxJ6xhCww",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrRgFp6G2s",
"doi": "10.1109/ISMAR55827.2022.00097",
"title": "VRDoc: Gaze-based Interactions for VR Reading Experience",
"normalizedTitle": "VRDoc: Gaze-based Interactions for VR Reading Experience",
"abstract": "Virtual reality (VR) offers the promise of an infinite office and remote collaboration, however, existing interactions in VR do not strongly support one of the most essential tasks for most knowledge workers, reading. This paper presents VRDoc, a set of gaze-based interaction methods designed to improve the reading experience in VR. We introduce three key components: Gaze Select-and-Snap for document selection, Gaze MagGlass for enhanced text legibility, and Gaze Scroll for ease of document traversal. We implemented each of these tools using a commodity VR headset with eye-tracking. In a series of user studies with 13 participants, we show that VRDoc makes VR reading both more efficient (p < 0.01) and less demanding (p < 0.01), and when given a choice, users preferred to use our tools over the current VR reading methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality (VR) offers the promise of an infinite office and remote collaboration, however, existing interactions in VR do not strongly support one of the most essential tasks for most knowledge workers, reading. This paper presents VRDoc, a set of gaze-based interaction methods designed to improve the reading experience in VR. We introduce three key components: Gaze Select-and-Snap for document selection, Gaze MagGlass for enhanced text legibility, and Gaze Scroll for ease of document traversal. We implemented each of these tools using a commodity VR headset with eye-tracking. In a series of user studies with 13 participants, we show that VRDoc makes VR reading both more efficient (p < 0.01) and less demanding (p < 0.01), and when given a choice, users preferred to use our tools over the current VR reading methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality (VR) offers the promise of an infinite office and remote collaboration, however, existing interactions in VR do not strongly support one of the most essential tasks for most knowledge workers, reading. This paper presents VRDoc, a set of gaze-based interaction methods designed to improve the reading experience in VR. We introduce three key components: Gaze Select-and-Snap for document selection, Gaze MagGlass for enhanced text legibility, and Gaze Scroll for ease of document traversal. We implemented each of these tools using a commodity VR headset with eye-tracking. In a series of user studies with 13 participants, we show that VRDoc makes VR reading both more efficient (p < 0.01) and less demanding (p < 0.01), and when given a choice, users preferred to use our tools over the current VR reading methods.",
"fno": "532500a787",
"keywords": [
"Gaze Tracking",
"Text Analysis",
"User Interfaces",
"Virtual Reality",
"Commodity VR Headset",
"Gaze Mag Glass",
"Gaze Based Interactions",
"Virtual Reality",
"VR Reading Experience",
"VR Reading Methods",
"VR Doc",
"Headphones",
"Collaboration",
"Gaze Tracking",
"Task Analysis",
"Augmented Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Interaction Techniques"
],
"authors": [
{
"affiliation": "University of Maryland",
"fullName": "Geonsun Lee",
"givenName": "Geonsun",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Jennifer Healey",
"givenName": "Jennifer",
"surname": "Healey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland",
"fullName": "Dinesh Manocha",
"givenName": "Dinesh",
"surname": "Manocha",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "787-796",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1JrRgzirMwo",
"name": "pismar202253250-09995681s1-mm_532500a787.zip",
"size": "46.3 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202253250-09995681s1-mm_532500a787.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "532500a777",
"articleId": "1JrR1CsIUjC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a797",
"articleId": "1JrR9qkyvsc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icat/2007/3056/0/30560280",
"title": "Interaction Without Gesture or Speech -- A Gaze Controlled AR System",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2007/30560280/12OmNCcKQtv",
"parentPublication": {
"id": "proceedings/icat/2007/3056/0",
"title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446215",
"title": "Gaze Guidance in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446215/13bd1gJ1v0y",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699248",
"title": "DualGaze: Addressing the Midas Touch Problem in Gaze Mediated VR Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699248/19F1R5RaLFS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a832",
"title": "GazeDock: Gaze-Only Menu Selection in Virtual Reality using Auto-Triggering Peripheral Menu",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a832/1CJbR6qnKdW",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a562",
"title": "Gaze Capture based Considerate Behaviour Control of Virtual Guiding Agent",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a562/1CJfoWhFCXm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/emip/2022/9289/0/928900a008",
"title": "Entropy of Eye Movements While Reading Code or Text",
"doi": null,
"abstractUrl": "/proceedings-article/emip/2022/928900a008/1ED1UZAQKME",
"parentPublication": {
"id": "proceedings/emip/2022/9289/0",
"title": "2022 IEEE/ACM 10th International Workshop on Eye Movements in Programming (EMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a170",
"title": "Development and evaluation of car training system using VR and eye tracking technology",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a170/1GU75yVJubS",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089578",
"title": "Exploring Eye Gaze Visualization Techniques for Identifying Distracted Students in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089578/1jIxfimnIaY",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090461",
"title": "Front Camera Eye Tracking For Mobile VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pyswxBB73y",
"doi": "10.1109/ISMAR50242.2020.00033",
"title": "Optical Gaze Tracking with Spatially-Sparse Single-Pixel Detectors",
"normalizedTitle": "Optical Gaze Tracking with Spatially-Sparse Single-Pixel Detectors",
"abstract": "Gaze tracking is an essential component of next generation displays for virtual reality and augmented reality applications. Traditional camera-based gaze trackers used in next generation displays are known to be lacking in one or multiple of the following metrics: power consumption, cost, computational complexity, estimation accuracy, latency, and form-factor. We propose the use of discrete photodiodes and light-emitting diodes (LEDs) as an alternative to traditional camera-based gaze tracking approaches while taking all of these metrics into consideration. We begin by developing a rendering-based simulation framework for understanding the relationship between light sources and a virtual model eyeball. Findings from this framework are used for the placement of LEDs and photodiodes. Our first prototype uses a neural network to obtain an average error rate of 2.67° at 400 Hz while demanding only 16 mW. By simplifying the implementation to using only LEDs, duplexed as light transceivers, and more minimal machine learning model, namely a light-weight supervised Gaussian process regression algorithm, we show that our second prototype is capable of an average error rate of 1.57° at 250 Hz using 800 mW.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Gaze tracking is an essential component of next generation displays for virtual reality and augmented reality applications. Traditional camera-based gaze trackers used in next generation displays are known to be lacking in one or multiple of the following metrics: power consumption, cost, computational complexity, estimation accuracy, latency, and form-factor. We propose the use of discrete photodiodes and light-emitting diodes (LEDs) as an alternative to traditional camera-based gaze tracking approaches while taking all of these metrics into consideration. We begin by developing a rendering-based simulation framework for understanding the relationship between light sources and a virtual model eyeball. Findings from this framework are used for the placement of LEDs and photodiodes. Our first prototype uses a neural network to obtain an average error rate of 2.67° at 400 Hz while demanding only 16 mW. By simplifying the implementation to using only LEDs, duplexed as light transceivers, and more minimal machine learning model, namely a light-weight supervised Gaussian process regression algorithm, we show that our second prototype is capable of an average error rate of 1.57° at 250 Hz using 800 mW.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Gaze tracking is an essential component of next generation displays for virtual reality and augmented reality applications. Traditional camera-based gaze trackers used in next generation displays are known to be lacking in one or multiple of the following metrics: power consumption, cost, computational complexity, estimation accuracy, latency, and form-factor. We propose the use of discrete photodiodes and light-emitting diodes (LEDs) as an alternative to traditional camera-based gaze tracking approaches while taking all of these metrics into consideration. We begin by developing a rendering-based simulation framework for understanding the relationship between light sources and a virtual model eyeball. Findings from this framework are used for the placement of LEDs and photodiodes. Our first prototype uses a neural network to obtain an average error rate of 2.67° at 400 Hz while demanding only 16 mW. By simplifying the implementation to using only LEDs, duplexed as light transceivers, and more minimal machine learning model, namely a light-weight supervised Gaussian process regression algorithm, we show that our second prototype is capable of an average error rate of 1.57° at 250 Hz using 800 mW.",
"fno": "850800a117",
"keywords": [
"Augmented Reality",
"Cameras",
"Gaussian Processes",
"Gaze Tracking",
"Learning Artificial Intelligence",
"Photodiodes",
"Regression Analysis",
"Rendering Computer Graphics",
"Optical Gaze Tracking",
"Spatially Sparse Single Pixel Detectors",
"Generation Displays",
"Virtual Reality",
"Reality Applications",
"Traditional Camera Based Gaze Trackers",
"Power Consumption",
"Computational Complexity",
"Estimation Accuracy",
"Form Factor",
"Discrete Photodiodes",
"Light Emitting Diodes",
"Traditional Camera Based Gaze Tracking",
"Rendering Based Simulation Framework",
"Light Sources",
"Virtual Model Eyeball",
"Average Error Rate",
"Light Transceivers",
"Light Weight Supervised Gaussian Process Regression Algorithm",
"Solid Modeling",
"Wearable Computers",
"Prototypes",
"Gaze Tracking",
"Light Emitting Diodes",
"Photodiodes",
"Augmented Reality",
"Human Centered Computing",
"Ubiquitous And Mobile Computing",
"Ubiquitous And Mobile Devices",
"Computer Systems Organization",
"Embedded And Cyber Physical Systems",
"Sensors And Actuators"
],
"authors": [
{
"affiliation": "University of Washington",
"fullName": "Richard Li",
"givenName": "Richard",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Washington",
"fullName": "Eric Whitmire",
"givenName": "Eric",
"surname": "Whitmire",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA Research",
"fullName": "Michael Stengel",
"givenName": "Michael",
"surname": "Stengel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA Research",
"fullName": "Ben Boudaoud",
"givenName": "Ben",
"surname": "Boudaoud",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA Research",
"fullName": "Jan Kautz",
"givenName": "Jan",
"surname": "Kautz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA Research",
"fullName": "David Luebke",
"givenName": "David",
"surname": "Luebke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Washington",
"fullName": "Shwetak Patel",
"givenName": "Shwetak",
"surname": "Patel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA Research",
"fullName": "Kaan Akşit",
"givenName": "Kaan",
"surname": "Akşit",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "117-126",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a109",
"articleId": "1pysuUuZCwM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a127",
"articleId": "1pysv4MOhNK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/waina/2016/2461/0/2461a878",
"title": "A Visible Light Communication Positioning Mechanism in Industrial Logistics Management",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2016/2461a878/12OmNvSKNVR",
"parentPublication": {
"id": "proceedings/waina/2016/2461/0",
"title": "2016 30th International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2015/7905/0/7905a904",
"title": "Implementation of an Eye Gaze Tracking System for the Disabled People",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2015/7905a904/12OmNwEJ115",
"parentPublication": {
"id": "proceedings/aina/2015/7905/0",
"title": "2015 IEEE 29th International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/1/4252a131",
"title": "A Simplified 3D Gaze Tracking Technology with Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a131/12OmNwqft0F",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2018/5395/0/539501a281",
"title": "A Robust CSI-HARQ MIMO Visible Light Communication Scheme for Wireless Industrial Networking",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2018/539501a281/12OmNxdm4EH",
"parentPublication": {
"id": "proceedings/waina/2018/5395/0",
"title": "2018 32nd International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeae/2015/8328/0/07386224",
"title": "Visible Light Communication Applied on Vehicle-to-Vehicle Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icmeae/2015/07386224/12OmNy3149N",
"parentPublication": {
"id": "proceedings/icmeae/2015/8328/0",
"title": "2015 International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2015/9101/0/9101a235",
"title": "Shine: A Step Towards Distributed Multi-Hop Visible Light Communication",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2015/9101a235/12OmNyNQSBE",
"parentPublication": {
"id": "proceedings/mass/2015/9101/0",
"title": "2015 IEEE 12th International Conference on Mobile Ad Hoc and Sensor Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2013/3755/0/06755008",
"title": "MIMO-diversity switching techniques for digital transmission in visible light communication",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2013/06755008/12OmNzBOhGO",
"parentPublication": {
"id": "proceedings/iscc/2013/3755/0",
"title": "2013 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020101",
"title": "Non-Contact Eye Gaze Tracking System by Mapping of Corneal Reflections",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020101/12OmNzgwmIY",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a320",
"title": "Improved vergence and accommodation via Purkinje Image tracking with multiple cameras for AR glasses",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a320/1pysxaykIAo",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09389490",
"title": "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09389490/1smZT5W55V6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQQvE3OQo",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00035",
"title": "Gaze-Adaptive Subtitles Considering the Balance among Vertical/Horizontal and Depth of Eye Movement",
"normalizedTitle": "Gaze-Adaptive Subtitles Considering the Balance among Vertical/Horizontal and Depth of Eye Movement",
"abstract": "Subtitles (captions displayed on the screen) are important in 3D content, such as virtual reality (VR) and 3D movies, to help users understand the content. However, an optimal displaying method and framework for subtitles have not been established for 3D content because 3D has a depth factor. To determine how to place text in 3D content, we propose four methods of moving subtitles dynamically considering the balance between the vertical/horizontal and depth of gaze shift. These methods are used to reduce the difference in depth or distance between the gaze position and subtitles. Additionally, we evaluate the readability of the text and participants’ fatigue. The results show that aligning the text horizontally and vertically to eye movements improves visibility and readability. It is also shown that the eyestrain is related to the distance between the object and subtitles. This evaluation provides basic knowledge for presenting text in 3D content.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Subtitles (captions displayed on the screen) are important in 3D content, such as virtual reality (VR) and 3D movies, to help users understand the content. However, an optimal displaying method and framework for subtitles have not been established for 3D content because 3D has a depth factor. To determine how to place text in 3D content, we propose four methods of moving subtitles dynamically considering the balance between the vertical/horizontal and depth of gaze shift. These methods are used to reduce the difference in depth or distance between the gaze position and subtitles. Additionally, we evaluate the readability of the text and participants’ fatigue. The results show that aligning the text horizontally and vertically to eye movements improves visibility and readability. It is also shown that the eyestrain is related to the distance between the object and subtitles. This evaluation provides basic knowledge for presenting text in 3D content.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Subtitles (captions displayed on the screen) are important in 3D content, such as virtual reality (VR) and 3D movies, to help users understand the content. However, an optimal displaying method and framework for subtitles have not been established for 3D content because 3D has a depth factor. To determine how to place text in 3D content, we propose four methods of moving subtitles dynamically considering the balance between the vertical/horizontal and depth of gaze shift. These methods are used to reduce the difference in depth or distance between the gaze position and subtitles. Additionally, we evaluate the readability of the text and participants’ fatigue. The results show that aligning the text horizontally and vertically to eye movements improves visibility and readability. It is also shown that the eyestrain is related to the distance between the object and subtitles. This evaluation provides basic knowledge for presenting text in 3D content.",
"fno": "129800a127",
"keywords": [
"Eye",
"Image Colour Analysis",
"Stereo Image Processing",
"Text Analysis",
"Video Signal Processing",
"Virtual Reality",
"Depth Factor",
"Gaze Shift",
"Gaze Position",
"Eye Movements",
"Gaze Adaptive Subtitles",
"Optimal Displaying Method",
"Three Dimensional Displays",
"Tracking",
"Games",
"Motion Pictures",
"Fatigue",
"Augmented Reality",
"Videos",
"Virtual Reality",
"Eye Tracking",
"Subtitles",
"Sensing"
],
"authors": [
{
"affiliation": "Grad. Sch. of Engineering Kobe University",
"fullName": "Yusuke Shimizu",
"givenName": "Yusuke",
"surname": "Shimizu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grad. Sch. of Engineering Kobe University",
"fullName": "Ayumi Ohnishi",
"givenName": "Ayumi",
"surname": "Ohnishi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grad. Sch. of Engineering Kobe University",
"fullName": "Tsutomu Terada",
"givenName": "Tsutomu",
"surname": "Terada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grad. Sch. of Engineering Kobe University",
"fullName": "Masahiko Tsukamoto",
"givenName": "Masahiko",
"surname": "Tsukamoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "127-132",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeQPXwN1xC",
"name": "pismar-adjunct202112980-09585865s1-mm_129800a127.zip",
"size": "102 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar-adjunct202112980-09585865s1-mm_129800a127.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "129800a121",
"articleId": "1yeQzcCDmhy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a133",
"articleId": "1yeQzHQNgvC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iwsca/2008/3317/0/3317a166",
"title": "Automatic Subtitles Localization through Speaker Identification in Multimedia System",
"doi": null,
"abstractUrl": "/proceedings-article/iwsca/2008/3317a166/12OmNARRYCS",
"parentPublication": {
"id": "proceedings/iwsca/2008/3317/0",
"title": "Semantic Computing and Applications, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06238904",
"title": "The measurement of eyestrain caused from diverse binocular disparities, viewing time and display sizes in watching stereoscopic 3D content",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06238904/12OmNqJHFuT",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2016/2535/0/2535a659",
"title": "Study on the Impact of the Depth of 3D Subtitles on Visual Comfort",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2016/2535a659/12OmNyvY9zI",
"parentPublication": {
"id": "proceedings/icisce/2016/2535/0",
"title": "2016 3rd International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a155",
"title": "[POSTER] MR TV Mozaik: A New Mixed Reality Interactive TV Experience",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a155/12OmNz3bdL7",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446058",
"title": "User Preference for SharpView-Enhanced Virtual Text During Non-Fixated Viewing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446058/13bd1ftOBCG",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010125",
"title": "Text Readability in Head-Worn Displays: Color and Style Optimization in Video versus Optical See-Through Devices",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010125/13rRUxNEqPT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2018/7616/0/761600a130",
"title": "The Impacts of Subtitles on 360-Degree Video Journalism Watching",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a130/17D45VsBU1i",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1532",
"title": "Aligning Subtitles in Sign Language Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1532/1BmGBqSqxsA",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a786",
"title": "An Examination on Reduction of Displayed Character Shake while Walking in Place with AR Glasses",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a786/1CJf8OTaee4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a649",
"title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxX3uMV",
"title": "2011 Frontiers in Education Conference (FIE)",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCfjeoS",
"doi": "10.1109/FIE.2011.6143035",
"title": "Problematizations of women's underrepresentation: Comparing educator interviews with the literature",
"normalizedTitle": "Problematizations of women's underrepresentation: Comparing educator interviews with the literature",
"abstract": "Various arguments for increasing diversity and the presence of underrepresented groups in engineering have been put forth. However, little attention has been paid to those arguments themselves or their implications. The goal of this paper is to call attention to the need for further reflection upon and analysis of how engineering educators understand and discuss underrepresentation and diversity. Specifically, it examines the motivations of a diverse group of engineering educators to undertake work on feminist engineering education initiatives. It builds on prior scholarship from the field of Science and Technology Studies (STS) of the ways in which underrepresentation has been framed as a problem. Participants' responses to the question of why underrepresentation is a problem are quoted at length and discussed. They are compared with prior findings from an analysis of the framings found in engineering education publications. Several differences between the publications and interview data are identified and the implications of these findings for engineering education, engineering education research, underrepresentation, and diversity more broadly, are then discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Various arguments for increasing diversity and the presence of underrepresented groups in engineering have been put forth. However, little attention has been paid to those arguments themselves or their implications. The goal of this paper is to call attention to the need for further reflection upon and analysis of how engineering educators understand and discuss underrepresentation and diversity. Specifically, it examines the motivations of a diverse group of engineering educators to undertake work on feminist engineering education initiatives. It builds on prior scholarship from the field of Science and Technology Studies (STS) of the ways in which underrepresentation has been framed as a problem. Participants' responses to the question of why underrepresentation is a problem are quoted at length and discussed. They are compared with prior findings from an analysis of the framings found in engineering education publications. Several differences between the publications and interview data are identified and the implications of these findings for engineering education, engineering education research, underrepresentation, and diversity more broadly, are then discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Various arguments for increasing diversity and the presence of underrepresented groups in engineering have been put forth. However, little attention has been paid to those arguments themselves or their implications. The goal of this paper is to call attention to the need for further reflection upon and analysis of how engineering educators understand and discuss underrepresentation and diversity. Specifically, it examines the motivations of a diverse group of engineering educators to undertake work on feminist engineering education initiatives. It builds on prior scholarship from the field of Science and Technology Studies (STS) of the ways in which underrepresentation has been framed as a problem. Participants' responses to the question of why underrepresentation is a problem are quoted at length and discussed. They are compared with prior findings from an analysis of the framings found in engineering education publications. Several differences between the publications and interview data are identified and the implications of these findings for engineering education, engineering education research, underrepresentation, and diversity more broadly, are then discussed.",
"fno": "06143035",
"keywords": [
"Engineering Education",
"Gender Issues",
"Women Underrepresentation",
"Diversity",
"Motivations",
"Feminist Engineering Education",
"Science And Technology Studies",
"Publications",
"Interview",
"Engineering Education",
"Economics",
"Interviews",
"Cultural Differences",
"Communities",
"Conferences",
"Discourse",
"Diversity",
"Feminism",
"Social Justice",
"Underrepresentation"
],
"authors": [
{
"affiliation": "Virginia Tech",
"fullName": "Kacey Beddoes",
"givenName": "Kacey",
"surname": "Beddoes",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-10-01T00:00:00",
"pubType": "proceedings",
"pages": "F4H-1-F4H-6",
"year": "2011",
"issn": "0190-5848",
"isbn": "978-1-61284-468-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06143034",
"articleId": "12OmNwBjP7j",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06143036",
"articleId": "12OmNxWcHo6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2015/8454/0/07344119",
"title": "Engineering self-efficacy, interactions with faculty, and other forms of capital for underrepresented engineering students",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344119/12OmNBcAGKG",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2014/3922/0/07044017",
"title": "Exploring conceptual understanding and personal epistemologies through metaphor",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2014/07044017/12OmNCxtyMk",
"parentPublication": {
"id": "proceedings/fie/2014/3922/0",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2016/1790/0/07757565",
"title": "Exploring student motivation towards diversity education in engineering",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2016/07757565/12OmNvSKNBZ",
"parentPublication": {
"id": "proceedings/fie/2016/1790/0",
"title": "2016 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2018/5725/0/572501a045",
"title": "Diversity in Software Engineering",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2018/572501a045/13bd1fph1yY",
"parentPublication": {
"id": "proceedings/chase/2018/5725/0",
"title": "2018 IEEE/ACM 11th International Workshop on Cooperative and Human Aspects of Software Engineering (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/sp/2017/01/msp2017010066",
"title": "Security & Privacy Week Interviews, Part 3",
"doi": null,
"abstractUrl": "/magazine/sp/2017/01/msp2017010066/13rRUwInvqs",
"parentPublication": {
"id": "mags/sp",
"title": "IEEE Security & Privacy",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659044",
"title": "Exploring Diversity and Inclusion in the Professional Formation of Engineers through Design Sessions",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659044/18j95SVvrVK",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659051",
"title": "The Structure of Change: A Content Analysis of Engineering Diversity Plans and Mission Statements",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659051/18j9k795jzO",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08658478",
"title": "Engineering Gender Identities of Women in a Service-Learning Context",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08658478/18j9oVoFQaI",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2019/1746/0/09028677",
"title": "Just Deserts: Engineering for All, Everywhere",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2019/09028677/1iffvZgM5Es",
"parentPublication": {
"id": "proceedings/fie/2019/1746/0",
"title": "2019 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2021/3851/0/09637422",
"title": "Student-Faculty Interactions to Promote Equity in Engineering",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2021/09637422/1zuvIt7Va1i",
"parentPublication": {
"id": "proceedings/fie/2021/3851/0",
"title": "2021 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXDI2lhHq",
"doi": "10.1109/VRW52623.2021.00250",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"normalizedTitle": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"abstract": "Although various issues related to VR hardware and virtual content have been identified as likely causes of VR sickness, human factors, such as age, gender and personal susceptibility to VR sickness, determine whether a user will experience VR sickness or how severe their symptoms will be. However, current VR sickness mitigation or measurement techniques do not take these human factors into consideration. This doctoral thesis aims to identify and quantify the human factors that cause difference in susceptibility to VR sickness among different demographic user groups.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although various issues related to VR hardware and virtual content have been identified as likely causes of VR sickness, human factors, such as age, gender and personal susceptibility to VR sickness, determine whether a user will experience VR sickness or how severe their symptoms will be. However, current VR sickness mitigation or measurement techniques do not take these human factors into consideration. This doctoral thesis aims to identify and quantify the human factors that cause difference in susceptibility to VR sickness among different demographic user groups.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although various issues related to VR hardware and virtual content have been identified as likely causes of VR sickness, human factors, such as age, gender and personal susceptibility to VR sickness, determine whether a user will experience VR sickness or how severe their symptoms will be. However, current VR sickness mitigation or measurement techniques do not take these human factors into consideration. This doctoral thesis aims to identify and quantify the human factors that cause difference in susceptibility to VR sickness among different demographic user groups.",
"fno": "405700a735",
"keywords": [
"Human Factors",
"User Experience",
"Virtual Reality",
"Virtual Content",
"Human Factors",
"Measurement Techniques",
"Universal VR Sickness Mitigation Strategies",
"VR Hardware",
"Age",
"Gender",
"Personal Susceptibility",
"User Experience",
"Human Computer Interaction",
"Three Dimensional Displays",
"Navigation",
"Conferences",
"Human Factors",
"Virtual Reality",
"Medical Services",
"Human Centered Computing Virtual Reality"
],
"authors": [
{
"affiliation": "University of Nevada,Department of Computer Science and Engineering,Reno",
"fullName": "Isayas Berhe Adhanom",
"givenName": "Isayas Berhe",
"surname": "Adhanom",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "735-736",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a733",
"articleId": "1tnXLofRauc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a737",
"articleId": "1tnWN2HrHuU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446346",
"title": "Reducing VR Sickness Through Peripheral Visual Effects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446346/13bd1fHrlRY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09793626",
"title": "Mitigation of VR Sickness during Locomotion with a Motion-Based Dynamic Vision Modulator",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09793626/1E5LEepCqTC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a328",
"title": "Exploring Neural Biomarkers in Young Adults Resistant to VR Motion Sickness: A Pilot Study of EEG",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a328/1MNgLSkIsUw",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a094",
"title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a094/1MNgWtYsR5S",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798136",
"title": "VR Sickness in Continuous Exposure to Live-action 180°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089437",
"title": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089437/1jIxcfT0Wt2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412423",
"title": "VR Sickness Assessment with Perception Prior and Hybrid Temporal Features",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412423/1tmiMP82mre",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuB6Ibu8j6",
"doi": "10.1109/VR50410.2021.00095",
"title": "Who Are Virtual Reality Headset Owners? A Survey and Comparison of Headset Owners and Non-Owners",
"normalizedTitle": "Who Are Virtual Reality Headset Owners? A Survey and Comparison of Headset Owners and Non-Owners",
"abstract": "The number of people who own a virtual reality (VR) head-mounted display (HMD) has reached a point where researchers can readily recruit HMD owners to participate remotely using their own equipment. However, HMD owners recruited online may differ from the university community members who typically participate in VR research. HMD owners (n=220) and non-owners (n=282) were recruited through two online work sites-Amazon's Mechanical Turk and Prolific-and an undergraduate participant pool. Participants completed a survey in which they provided demographic information and completed measures of HMD use, video game use, spatial ability, and motion sickness susceptibility. In the context of the populations sampled, the results provide 1) a characterization of HMD owners, 2) a snapshot of the most commonly owned HMDs, 3) a comparison between HMD owners and non-owners, and 4) a comparison among online workers and undergraduates. Significant gender differences were found: men reported lower motion sickness susceptibility and more video game hours than women, and men outperformed women on spatial tasks. Men comprised a greater proportion of HMD owners than non-owners, but after accounting for this imbalance, HMD owners did not differ appreciably from non-owners. Comparing across recruitment platform, male undergraduates outperformed male online workers on spatial tests, and female undergraduates played fewer video game hours than female online workers. The data removal rate was higher from Amazon compared to Prolific, possibly reflecting greater dishonesty. These results provide a description of HMD users that can inform researchers recruiting remote participants through online work sites. These results also signal a need for caution when comparing in-person VR research that primarily enrolls undergraduates to online VR research that enrolls online workers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The number of people who own a virtual reality (VR) head-mounted display (HMD) has reached a point where researchers can readily recruit HMD owners to participate remotely using their own equipment. However, HMD owners recruited online may differ from the university community members who typically participate in VR research. HMD owners (n=220) and non-owners (n=282) were recruited through two online work sites-Amazon's Mechanical Turk and Prolific-and an undergraduate participant pool. Participants completed a survey in which they provided demographic information and completed measures of HMD use, video game use, spatial ability, and motion sickness susceptibility. In the context of the populations sampled, the results provide 1) a characterization of HMD owners, 2) a snapshot of the most commonly owned HMDs, 3) a comparison between HMD owners and non-owners, and 4) a comparison among online workers and undergraduates. Significant gender differences were found: men reported lower motion sickness susceptibility and more video game hours than women, and men outperformed women on spatial tasks. Men comprised a greater proportion of HMD owners than non-owners, but after accounting for this imbalance, HMD owners did not differ appreciably from non-owners. Comparing across recruitment platform, male undergraduates outperformed male online workers on spatial tests, and female undergraduates played fewer video game hours than female online workers. The data removal rate was higher from Amazon compared to Prolific, possibly reflecting greater dishonesty. These results provide a description of HMD users that can inform researchers recruiting remote participants through online work sites. These results also signal a need for caution when comparing in-person VR research that primarily enrolls undergraduates to online VR research that enrolls online workers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The number of people who own a virtual reality (VR) head-mounted display (HMD) has reached a point where researchers can readily recruit HMD owners to participate remotely using their own equipment. However, HMD owners recruited online may differ from the university community members who typically participate in VR research. HMD owners (n=220) and non-owners (n=282) were recruited through two online work sites-Amazon's Mechanical Turk and Prolific-and an undergraduate participant pool. Participants completed a survey in which they provided demographic information and completed measures of HMD use, video game use, spatial ability, and motion sickness susceptibility. In the context of the populations sampled, the results provide 1) a characterization of HMD owners, 2) a snapshot of the most commonly owned HMDs, 3) a comparison between HMD owners and non-owners, and 4) a comparison among online workers and undergraduates. Significant gender differences were found: men reported lower motion sickness susceptibility and more video game hours than women, and men outperformed women on spatial tasks. Men comprised a greater proportion of HMD owners than non-owners, but after accounting for this imbalance, HMD owners did not differ appreciably from non-owners. Comparing across recruitment platform, male undergraduates outperformed male online workers on spatial tests, and female undergraduates played fewer video game hours than female online workers. The data removal rate was higher from Amazon compared to Prolific, possibly reflecting greater dishonesty. These results provide a description of HMD users that can inform researchers recruiting remote participants through online work sites. These results also signal a need for caution when comparing in-person VR research that primarily enrolls undergraduates to online VR research that enrolls online workers.",
"fno": "255600a687",
"keywords": [
"Computer Aided Instruction",
"Computer Games",
"Educational Institutions",
"Gender Issues",
"Helmet Mounted Displays",
"Human Factors",
"Internet",
"Recruitment",
"Virtual Reality",
"Virtual Reality Headset Owners",
"Nonowners",
"HMD Owners",
"Headphones",
"Three Dimensional Displays",
"Sociology",
"Resists",
"Virtual Reality",
"Games",
"User Interfaces",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality"
],
"authors": [
{
"affiliation": "Iowa State University",
"fullName": "Jonathan W. Kelly",
"givenName": "Jonathan W.",
"surname": "Kelly",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Lucia A. Cherep",
"givenName": "Lucia A.",
"surname": "Cherep",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Alex F. Lim",
"givenName": "Alex F.",
"surname": "Lim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Taylor Doty",
"givenName": "Taylor",
"surname": "Doty",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Iowa State University",
"fullName": "Stephen B. Gilbert",
"givenName": "Stephen B.",
"surname": "Gilbert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "687-694",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "255600a679",
"articleId": "1tuAQT1z2EM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a695",
"articleId": "1tuAf9n910Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892375",
"title": "Experiencing guidance in 3D spaces with a vibrotactile head-mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892375/12OmNy5hRo2",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446524",
"title": "HangerOVER: Development of HMO-Embedded Haptic Display Using the Hanger Reflex and VR Application",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446524/13bd1fdV4l2",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/03/08481564",
"title": "Towards BCI-Based Interfaces for Augmented Reality: Feasibility, Design and Evaluation",
"doi": null,
"abstractUrl": "/journal/tg/2020/03/08481564/146z4OQdyi9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699267",
"title": "Perception and Action in Peripersonal Space: A Comparison Between Video and Optical See-Through Augmented Reality Devices",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699267/19F1NuzXn9u",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a666",
"title": "If I Share with you my Perspective, Would you Share your Data with me?",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a666/1CJcFhW6P6M",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2022/5908/0/09953891",
"title": "AVDOS - Affective Video Database Online Study Video database for affective research emotionally validated through an online survey",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2022/09953891/1IAK6fTXzdm",
"parentPublication": {
"id": "proceedings/acii/2022/5908/0",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090421",
"title": "Analysis of Interaction Spaces for VR in Public Transport Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090421/1jIxr9dj52o",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a412",
"title": "Seamless, Bi-directional Transitions along the Reality-Virtuality Continuum: A Conceptualization and Prototype Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a412/1pyswqrEtCE",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a759",
"title": "Turning a Messy Room into a Fully Immersive VR Playground",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a759/1tnXiK8j7fq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1ziP833pXeE",
"title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)",
"acronym": "iccst",
"groupId": "1838984",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1ziPjdcz2Mg",
"doi": "10.1109/ICCST53801.2021.00095",
"title": "Evaluation of Height and Speed Effects on the Comfort of VR Motion Picture Display",
"normalizedTitle": "Evaluation of Height and Speed Effects on the Comfort of VR Motion Picture Display",
"abstract": "Virtual Reality (VR) technology has become ever more popular with consumers in recent years. However, dizziness brought by VR greatly affects its experience, especially VR motion sickness. Therefore, the industry needs in order to study how to improve the comfort of VR motion pictures. The subject conducts subjective evaluation experiments through the constructed virtual 3D scene. The relationship between the height and speed of the VR camera, as well as the gender of the audience and VR motion sickness, is considered by subjective evaluation experiments. Viewer’s VR motion sickness is the least obvious when the viewer is close to the height and average pace, and women are more likely to be dizzy to VR than men, which are proved by experimental results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) technology has become ever more popular with consumers in recent years. However, dizziness brought by VR greatly affects its experience, especially VR motion sickness. Therefore, the industry needs in order to study how to improve the comfort of VR motion pictures. The subject conducts subjective evaluation experiments through the constructed virtual 3D scene. The relationship between the height and speed of the VR camera, as well as the gender of the audience and VR motion sickness, is considered by subjective evaluation experiments. Viewer’s VR motion sickness is the least obvious when the viewer is close to the height and average pace, and women are more likely to be dizzy to VR than men, which are proved by experimental results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) technology has become ever more popular with consumers in recent years. However, dizziness brought by VR greatly affects its experience, especially VR motion sickness. Therefore, the industry needs in order to study how to improve the comfort of VR motion pictures. The subject conducts subjective evaluation experiments through the constructed virtual 3D scene. The relationship between the height and speed of the VR camera, as well as the gender of the audience and VR motion sickness, is considered by subjective evaluation experiments. Viewer’s VR motion sickness is the least obvious when the viewer is close to the height and average pace, and women are more likely to be dizzy to VR than men, which are proved by experimental results.",
"fno": "425400a426",
"keywords": [
"Computer Displays",
"Image Motion Analysis",
"Solid Modelling",
"Virtual Reality",
"Subjective Evaluation Experiments",
"Constructed Virtual 3 D Scene",
"VR Camera",
"VR Motion Sickness",
"Viewer",
"Average Pace",
"Speed Effects",
"Vr Motion Picture Display",
"Virtual Reality Technology",
"VR Motion Pictures",
"Industries",
"Three Dimensional Displays",
"Virtual Reality",
"Motion Pictures",
"Cameras",
"Videos",
"Virtual Reality",
"Motion Sickness",
"Unity 3 D",
"Subjective Evaluation"
],
"authors": [
{
"affiliation": "Communication University of China,Information Engineering,Beijing,China",
"fullName": "Tianyi Cui",
"givenName": "Tianyi",
"surname": "Cui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Communication University of China,Information Engineering,Beijing,China",
"fullName": "Yu Yang",
"givenName": "Yu",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Communication University of China,State Key Laboratory of Media Convergence and Communication,Beijing,China",
"fullName": "Yunhai Guo",
"givenName": "Yunhai",
"surname": "Guo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccst",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "426-430",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4254-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "425400a422",
"articleId": "1ziPpAlC6eA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "425400a431",
"articleId": "1ziPpbBFNjW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/taai/2017/4203/0/4203a076",
"title": "Whitewater Slalom Pseudo Experience Device using 3-DOF Motion Base and VR Goggles",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2017/4203a076/12OmNBOll7A",
"parentPublication": {
"id": "proceedings/taai/2017/4203/0",
"title": "2017 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446144",
"title": "Redirected Scene Rotation for Immersive Movie Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446144/13bd1fHrlRD",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699194",
"title": "Comfort Intelligence for Autonomous Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699194/19F1NbD5DMs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a328",
"title": "Exploring Neural Biomarkers in Young Adults Resistant to VR Motion Sickness: A Pilot Study of EEG",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a328/1MNgLSkIsUw",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798136",
"title": "VR Sickness in Continuous Exposure to Live-action 180°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412423",
"title": "VR Sickness Assessment with Perception Prior and Hybrid Temporal Features",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412423/1tmiMP82mre",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a735",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pBMeBWXAZ2",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pBMkbxS3F6",
"doi": "10.1109/ISMAR-Adjunct51615.2020.00059",
"title": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning",
"normalizedTitle": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning",
"abstract": "Redirected Walking (RDW) is commonly used to overcome the limitation of real walking locomotion while exploring virtual worlds. Although a few machine learning-based RDW algorithm is proposed, most of the system did not go through live user evaluation. In this work, we evaluated a novel RDW controller proposed by Chang et al., in which the formatted steering rule is replaced with reinforcement learning(RL), by simulation and live user experiment. We found the RL-based RDW controller reduced boundary collisions significantly in both simulation and user study comparing to the heuristic algorithm, Steer-to-Center(S2C); also, there are no noticeable differences in immersiveness. These results indicate that the novel controller is superior to the heuristic method. Furthermore, as we conducted experiments in a relatively simple space and still outperformed the heuristic method, we are optimistic that the RL-based controller can maintain the high-performance in complicated scenarios in the future.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected Walking (RDW) is commonly used to overcome the limitation of real walking locomotion while exploring virtual worlds. Although a few machine learning-based RDW algorithm is proposed, most of the system did not go through live user evaluation. In this work, we evaluated a novel RDW controller proposed by Chang et al., in which the formatted steering rule is replaced with reinforcement learning(RL), by simulation and live user experiment. We found the RL-based RDW controller reduced boundary collisions significantly in both simulation and user study comparing to the heuristic algorithm, Steer-to-Center(S2C); also, there are no noticeable differences in immersiveness. These results indicate that the novel controller is superior to the heuristic method. Furthermore, as we conducted experiments in a relatively simple space and still outperformed the heuristic method, we are optimistic that the RL-based controller can maintain the high-performance in complicated scenarios in the future.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected Walking (RDW) is commonly used to overcome the limitation of real walking locomotion while exploring virtual worlds. Although a few machine learning-based RDW algorithm is proposed, most of the system did not go through live user evaluation. In this work, we evaluated a novel RDW controller proposed by Chang et al., in which the formatted steering rule is replaced with reinforcement learning(RL), by simulation and live user experiment. We found the RL-based RDW controller reduced boundary collisions significantly in both simulation and user study comparing to the heuristic algorithm, Steer-to-Center(S2C); also, there are no noticeable differences in immersiveness. These results indicate that the novel controller is superior to the heuristic method. Furthermore, as we conducted experiments in a relatively simple space and still outperformed the heuristic method, we are optimistic that the RL-based controller can maintain the high-performance in complicated scenarios in the future.",
"fno": "767500a201",
"keywords": [
"Control Engineering Computing",
"Gait Analysis",
"Learning Artificial Intelligence",
"Metaheuristics",
"Path Planning",
"Virtual Reality",
"Walking Locomotion",
"Virtual Worlds",
"RDW Algorithm",
"Live User Experiment",
"RL Based RDW Controller",
"Boundary Collisions",
"Heuristic Algorithm",
"Steer To Center",
"Optimal Redirected Walking Planning",
"Reinforcement Learning",
"Legged Locomotion",
"Machine Learning Algorithms",
"Heuristic Algorithms",
"Computational Modeling",
"Reinforcement Learning",
"Planning",
"Task Analysis",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "National Cheng Kung University,Tainan,Taiwan",
"fullName": "TsaiYen Ko",
"givenName": "TsaiYen",
"surname": "Ko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan University,Taipei,Taiwan",
"fullName": "LiWen Su",
"givenName": "LiWen",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo,Tokyo,Japan",
"fullName": "Yuchen Chang",
"givenName": "Yuchen",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo,Tokyo,Japan",
"fullName": "Keigo Matsumoto",
"givenName": "Keigo",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo,Tokyo,Japan",
"fullName": "Takuji Narumi",
"givenName": "Takuji",
"surname": "Narumi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo,Tokyo,Japan",
"fullName": "Michitaka Hirose",
"givenName": "Michitaka",
"surname": "Hirose",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "201-202",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7675-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "767500a195",
"articleId": "1pBMhHIpXdm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "767500a203",
"articleId": "1pBMigKK7F6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504742",
"title": "Simultaneous mapping and redirected walking for ad hoc free walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504742/12OmNyUFg0I",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09733261",
"title": "One-step out-of-place resetting for redirected walking in VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09733261/1BENJyPkx5S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09785918",
"title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09785918/1DPaEdHg6KQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998570",
"title": "A Steering Algorithm for Redirected Walking Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998570/1hx2DxYanDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089532",
"title": "Optimal Planning for Redirected Walking Based on Reinforcement Learning in Multi-user Environment with Irregularly Shaped Physical Space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089532/1jIx7m6wYKc",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a184",
"title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1gejsg2Q",
"doi": "10.1109/VR.2019.8797797",
"title": "Working Memory Load Performance Based on Collocation of Virtual and Physical Hands",
"normalizedTitle": "Working Memory Load Performance Based on Collocation of Virtual and Physical Hands",
"abstract": "The use of real-like hands in virtual reality simulators is common; however, research into understanding how the human brain perceives hands in virtual environments is limited. Self avatars are a great way to improve the users presence and perception of space [6], but the precise implementation of avatars is arduous, and including only hands is an attractive alternative. Earlier psychology research reported that the closer the hands to the studied object, the lower the working memory load. We hypothesized that in virtual environments, virtual hands that are collocated with the users hands should improve the users working memory load, and we tested our hypothesis with a between-participant study (n=30) measuring working memory load with the Stroop Interference Task.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The use of real-like hands in virtual reality simulators is common; however, research into understanding how the human brain perceives hands in virtual environments is limited. Self avatars are a great way to improve the users presence and perception of space [6], but the precise implementation of avatars is arduous, and including only hands is an attractive alternative. Earlier psychology research reported that the closer the hands to the studied object, the lower the working memory load. We hypothesized that in virtual environments, virtual hands that are collocated with the users hands should improve the users working memory load, and we tested our hypothesis with a between-participant study (n=30) measuring working memory load with the Stroop Interference Task.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The use of real-like hands in virtual reality simulators is common; however, research into understanding how the human brain perceives hands in virtual environments is limited. Self avatars are a great way to improve the users presence and perception of space [6], but the precise implementation of avatars is arduous, and including only hands is an attractive alternative. Earlier psychology research reported that the closer the hands to the studied object, the lower the working memory load. We hypothesized that in virtual environments, virtual hands that are collocated with the users hands should improve the users working memory load, and we tested our hypothesis with a between-participant study (n=30) measuring working memory load with the Stroop Interference Task.",
"fno": "08797797",
"keywords": [
"Avatars",
"Brain",
"Neurophysiology",
"Working Memory Load Performance",
"Physical Hands",
"Virtual Reality Simulators",
"Virtual Environments",
"Self Avatars",
"Virtual Hands",
"Human Brain",
"User Presence",
"User Hands",
"Psychology Research",
"Task Analysis",
"Interference",
"Avatars",
"Psychology",
"Color",
"Virtual Environments",
"Virtual Reality",
"Self Avatars",
"Virtual Hands",
"Working Memory",
"Stroop Interference Task"
],
"authors": [
{
"affiliation": "Davidson College",
"fullName": "Altan Tutar",
"givenName": "Altan",
"surname": "Tutar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Davidson College",
"fullName": "Tabitha Peck",
"givenName": "Tabitha",
"surname": "Peck",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1197-1198",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797847",
"articleId": "1cJ1dcfUZaM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797973",
"articleId": "1cJ0S2MS49O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223428",
"title": "Aughanded Virtuality — The hands in the virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223428/12OmNqBKTYf",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948466",
"title": "[Poster] Interacting with your own hands in a fully immersive MR system",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948466/12OmNrMHOkY",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504729",
"title": "Influence by others' opinions: Social pressure from agents in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504729/12OmNs5rkT8",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504689",
"title": "The impact of a self-avatar on cognitive load in immersive virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504689/12OmNviHKla",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131748",
"title": "Aughanded Virtuality - the hands in the virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131748/12OmNx5piUz",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549379",
"title": "Head motion animation using avatar gaze space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260949",
"title": "The Effect of Gender Body-Swap Illusions on Working Memory and Stereotype Threat",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260949/13rRUynHujg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08952604",
"title": "Effect of Avatar Appearance on Detection Thresholds for Remapped Hand Movements",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08952604/1gqqhpSZ19S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998371",
"title": "The Impact of a Self-Avatar, Hand Collocation, and Hand Proximity on Embodiment and Stroop Interference",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998371/1hrXiia6v9C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09495125",
"title": "Being an Avatar “for Real”: A Survey on Virtual Embodiment in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09495125/1vyju4jl6AE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgQoZswDu",
"doi": "10.1109/VR55154.2023.00036",
"title": "Power, Performance, and Image Quality Tradeoffs in Foveated Rendering",
"normalizedTitle": "Power, Performance, and Image Quality Tradeoffs in Foveated Rendering",
"abstract": "Extended reality (XR) devices, including augmented, virtual, and mixed reality, provide a deeply immersive experience. However, practical limitations like weight, heat, and comfort put extreme constraints on the performance, power consumption, and image quality of such systems. In this paper, we study how these constraints form the tradeoff between Fixed Foveated Rendering (FFR), Gaze-Tracked Foveated Rendering (TFR), and conventional, non-foveated rendering. While existing papers have often studied these methods, we provide the first comprehensive study of their relative feasibility in practical systems with limited battery life and computational budget. We show that TFR with the added cost of the gaze-tracker can often be more expensive than FFR. Thus, we co-design a gaze-tracked foveated renderer considering its benefits in computation, power efficiency, and tradeoffs in image quality. We describe principled approximations for eye tracking which provide up to a 9x speedup in runtime performance with approximately a 20x improvement in energy efficiency when run on a mobile GPU. In isolation, these approximations appear to significantly degrade the gaze quality, but appropriate compensation in the visual pipeline can mitigate the loss. Overall, we show that with a highly optimized gaze-tracker, TFR is feasible compared to FFR, resulting in up to 1.25x faster frame times while also reducing total energy consumption by over 40%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Extended reality (XR) devices, including augmented, virtual, and mixed reality, provide a deeply immersive experience. However, practical limitations like weight, heat, and comfort put extreme constraints on the performance, power consumption, and image quality of such systems. In this paper, we study how these constraints form the tradeoff between Fixed Foveated Rendering (FFR), Gaze-Tracked Foveated Rendering (TFR), and conventional, non-foveated rendering. While existing papers have often studied these methods, we provide the first comprehensive study of their relative feasibility in practical systems with limited battery life and computational budget. We show that TFR with the added cost of the gaze-tracker can often be more expensive than FFR. Thus, we co-design a gaze-tracked foveated renderer considering its benefits in computation, power efficiency, and tradeoffs in image quality. We describe principled approximations for eye tracking which provide up to a 9x speedup in runtime performance with approximately a 20x improvement in energy efficiency when run on a mobile GPU. In isolation, these approximations appear to significantly degrade the gaze quality, but appropriate compensation in the visual pipeline can mitigate the loss. Overall, we show that with a highly optimized gaze-tracker, TFR is feasible compared to FFR, resulting in up to 1.25x faster frame times while also reducing total energy consumption by over 40%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Extended reality (XR) devices, including augmented, virtual, and mixed reality, provide a deeply immersive experience. However, practical limitations like weight, heat, and comfort put extreme constraints on the performance, power consumption, and image quality of such systems. In this paper, we study how these constraints form the tradeoff between Fixed Foveated Rendering (FFR), Gaze-Tracked Foveated Rendering (TFR), and conventional, non-foveated rendering. While existing papers have often studied these methods, we provide the first comprehensive study of their relative feasibility in practical systems with limited battery life and computational budget. We show that TFR with the added cost of the gaze-tracker can often be more expensive than FFR. Thus, we co-design a gaze-tracked foveated renderer considering its benefits in computation, power efficiency, and tradeoffs in image quality. We describe principled approximations for eye tracking which provide up to a 9x speedup in runtime performance with approximately a 20x improvement in energy efficiency when run on a mobile GPU. In isolation, these approximations appear to significantly degrade the gaze quality, but appropriate compensation in the visual pipeline can mitigate the loss. Overall, we show that with a highly optimized gaze-tracker, TFR is feasible compared to FFR, resulting in up to 1.25x faster frame times while also reducing total energy consumption by over 40%.",
"fno": "481500a205",
"keywords": [
"Image Quality",
"Performance Evaluation",
"Visualization",
"Three Dimensional Displays",
"Runtime",
"Power Demand",
"Pipelines",
"Human Centered Computing X 2014 Visualization X 2014 Visualization Techniques X 2014 Treemaps",
"Human Centered Computing X 2014 Visualization X 2014 Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Rahul Singh",
"givenName": "Rahul",
"surname": "Singh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Muhammad Huzaifa",
"givenName": "Muhammad",
"surname": "Huzaifa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Jeffrey Liu",
"givenName": "Jeffrey",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NVIDIA",
"fullName": "Anjul Patney",
"givenName": "Anjul",
"surname": "Patney",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Hashim Sharif",
"givenName": "Hashim",
"surname": "Sharif",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Yifan Zhao",
"givenName": "Yifan",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois Urbana-Champaign",
"fullName": "Sarita Adve",
"givenName": "Sarita",
"surname": "Adve",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "205-214",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1MNgQezpuAE",
"name": "pvr202348150-010108084s1-mm_481500a205.zip",
"size": "90.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108084s1-mm_481500a205.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "481500a194",
"articleId": "1MNgFdvZLna",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a215",
"articleId": "1MNgpToB1Cg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2018/4195/0/08551511",
"title": "Eye Tracking-Based 360 Vr Foveated/Tiled Video Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551511/17D45WK5AoH",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a756",
"title": "Rectangular Mapping-based Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a756/1CJcj9wHjH2",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903564",
"title": "FoVolNet: Fast Volume Rendering using Foveated Deep Neural Networks",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903564/1GZombIreEg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873853",
"title": "Foveated Stochastic Lightcuts",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873853/1GjwMIuxYUE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a471",
"title": "Locomotion-aware Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09007492",
"title": "3D-Kernel Foveated Rendering for Light Fields",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09007492/1hJKlSvNgo8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09005240",
"title": "Eye-dominance-guided Foveated Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089437",
"title": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089437/1jIxcfT0Wt2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a001",
"title": "Foveated Instant Radiosity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a001/1pysxhw4Bqw",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a316",
"title": "Cloud Rendering Scheme for Standalone Virtual Reality Headset",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a316/1vg8ftWdDoY",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1iHT1kx9CdW",
"title": "2019 IEEE International Conference on Cloud Computing in Emerging Markets (CCEM)",
"acronym": "ccem",
"groupId": "1801957",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1iHT2m32LIY",
"doi": "10.1109/CCEM48484.2019.000-1",
"title": "Low-Cost Eye Tracking for Foveated Rendering Using Machine Learning",
"normalizedTitle": "Low-Cost Eye Tracking for Foveated Rendering Using Machine Learning",
"abstract": "This paper outlines a $50 head-mounted real-time eye tracker to track the user's eye, and uses foveated rendering reduce bandwidth costs and improve immersion. This is accomplished by using two cameras for detecting the iris and locating the beacons around the screen which tracks head movement in three dimensions. A neural net is trained on this data which then predicts where the user is looking based on the inputs from the two cameras. Foveated rendering is performed by only rendering the area of the screen currently being focused and blurring the area that falls under peripheral vision. Maturing this technology could be integrated into virtual reality headsets and for other immersive media experiences while dramatically decreasing bandwidth costs and increasing the overall functionality and capabilities of the devices. This would allow for a more robust as well as wireless virtual reality experience, which would mitigate the largest drawback to mass adoption: clunky wires and lack of a truly realistic experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper outlines a $50 head-mounted real-time eye tracker to track the user's eye, and uses foveated rendering reduce bandwidth costs and improve immersion. This is accomplished by using two cameras for detecting the iris and locating the beacons around the screen which tracks head movement in three dimensions. A neural net is trained on this data which then predicts where the user is looking based on the inputs from the two cameras. Foveated rendering is performed by only rendering the area of the screen currently being focused and blurring the area that falls under peripheral vision. Maturing this technology could be integrated into virtual reality headsets and for other immersive media experiences while dramatically decreasing bandwidth costs and increasing the overall functionality and capabilities of the devices. This would allow for a more robust as well as wireless virtual reality experience, which would mitigate the largest drawback to mass adoption: clunky wires and lack of a truly realistic experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper outlines a $50 head-mounted real-time eye tracker to track the user's eye, and uses foveated rendering reduce bandwidth costs and improve immersion. This is accomplished by using two cameras for detecting the iris and locating the beacons around the screen which tracks head movement in three dimensions. A neural net is trained on this data which then predicts where the user is looking based on the inputs from the two cameras. Foveated rendering is performed by only rendering the area of the screen currently being focused and blurring the area that falls under peripheral vision. Maturing this technology could be integrated into virtual reality headsets and for other immersive media experiences while dramatically decreasing bandwidth costs and increasing the overall functionality and capabilities of the devices. This would allow for a more robust as well as wireless virtual reality experience, which would mitigate the largest drawback to mass adoption: clunky wires and lack of a truly realistic experience.",
"fno": "633400a032",
"keywords": [
"Gaze Tracking",
"Helmet Mounted Displays",
"Learning Artificial Intelligence",
"Neural Nets",
"Rendering Computer Graphics",
"Virtual Reality",
"Eye Tracking",
"Machine Learning",
"Foveated Rendering",
"Cameras",
"Head Movement Tracking",
"Neural Net",
"Virtual Reality Headsets",
"Immersive Media Experiences",
"Head Mounted Real Time Eye Tracker",
"Neural Network Foveated Rendering Eye Tracking Machine Learning Iris Detection"
],
"authors": [
{
"affiliation": "Mumbai University, India",
"fullName": "Ashay Changwani",
"givenName": "Ashay",
"surname": "Changwani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mumbai University, India",
"fullName": "Tanuja Sarode",
"givenName": "Tanuja",
"surname": "Sarode",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ccem",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-09-01T00:00:00",
"pubType": "proceedings",
"pages": "32-39",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-6334-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "633400a025",
"articleId": "1iHT2CebboY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "633400a040",
"articleId": "1iHT2FrrAIg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/etvis/2016/4731/0/07851170",
"title": "An analysis of eye-tracking data in foveated ray tracing",
"doi": null,
"abstractUrl": "/proceedings-article/etvis/2016/07851170/12OmNvT2pjL",
"parentPublication": {
"id": "proceedings/etvis/2016/4731/0",
"title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504757",
"title": "Combining eye tracking with optimizations for lens astigmatism in modern wide-angle HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504757/12OmNySG3Vp",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551511",
"title": "Eye Tracking-Based 360 Vr Foveated/Tiled Video Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551511/17D45WK5AoH",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642529",
"title": "Manufacturing Application-Driven Foveated Near-Eye Displays",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642529/17PYEjG6pn1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a756",
"title": "Rectangular Mapping-based Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a756/1CJcj9wHjH2",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a205",
"title": "Power, Performance, and Image Quality Tradeoffs in Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a205/1MNgQoZswDu",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a471",
"title": "Locomotion-aware Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09007492",
"title": "3D-Kernel Foveated Rendering for Light Fields",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09007492/1hJKlSvNgo8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09005240",
"title": "Eye-dominance-guided Foveated Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a398",
"title": "Rendering Optimizations for Virtual Reality Using Eye-Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a398/1oZBBw6BBa8",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBscCYv",
"title": "2016 3rd International Conference on Information Science and Control Engineering (ICISCE)",
"acronym": "icisce",
"groupId": "1807704",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAJm0qq",
"doi": "10.1109/ICISCE.2016.309",
"title": "The Study of Optical Links Establishment with Ultra-Wide FOV Acquisition Scheme in FSO Network",
"normalizedTitle": "The Study of Optical Links Establishment with Ultra-Wide FOV Acquisition Scheme in FSO Network",
"abstract": "A free space optical (FSO) network can provide broadband local access and low probability of intercept. In order to realize rapid establishment of optical links in FSO network, we proposed an ultra-wide field-of-view (FOV) acquisition scheme, which is achieved by using a fisheye lens. With it, the signals from different nodes and directions can be received. In addition, fast successive scanning is permitted to establish the optical links with an ultra-wide FOV receiver, and the scanning time model for it is established. The simulation results show that an average scanning time of 7 s and a maximum scanning speed of 30.8 deg/s are achieved with a 185° receiver FOV and this for the entire uncertainty region of +/ 8°, i.e. the 3s uncertainty region.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A free space optical (FSO) network can provide broadband local access and low probability of intercept. In order to realize rapid establishment of optical links in FSO network, we proposed an ultra-wide field-of-view (FOV) acquisition scheme, which is achieved by using a fisheye lens. With it, the signals from different nodes and directions can be received. In addition, fast successive scanning is permitted to establish the optical links with an ultra-wide FOV receiver, and the scanning time model for it is established. The simulation results show that an average scanning time of 7 s and a maximum scanning speed of 30.8 deg/s are achieved with a 185° receiver FOV and this for the entire uncertainty region of +/ 8°, i.e. the 3s uncertainty region.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A free space optical (FSO) network can provide broadband local access and low probability of intercept. In order to realize rapid establishment of optical links in FSO network, we proposed an ultra-wide field-of-view (FOV) acquisition scheme, which is achieved by using a fisheye lens. With it, the signals from different nodes and directions can be received. In addition, fast successive scanning is permitted to establish the optical links with an ultra-wide FOV receiver, and the scanning time model for it is established. The simulation results show that an average scanning time of 7 s and a maximum scanning speed of 30.8 deg/s are achieved with a 185° receiver FOV and this for the entire uncertainty region of +/ 8°, i.e. the 3s uncertainty region.",
"fno": "2535b449",
"keywords": [
"Laser Beams",
"Lenses",
"Uncertainty",
"Optical Receivers",
"Optical Fiber Networks",
"Fast Successive Scanning",
"FSO Network",
"Optical Links",
"Fisheye Lens",
"Ultra Wide Field Of View"
],
"authors": [
{
"affiliation": null,
"fullName": "Bo Tu",
"givenName": "Bo",
"surname": "Tu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bingshuang Liu",
"givenName": "Bingshuang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shuaifu Dai",
"givenName": "Shuaifu",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yueying Liu",
"givenName": "Yueying",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icisce",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1449-1453",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-2535-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2535b444",
"articleId": "12OmNAnMuso",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2535b454",
"articleId": "12OmNButq6a",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2013/0400/0/06572720",
"title": "Free Space Optical Communication with OOK and BPSK modulation under different turbulent condition",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572720/12OmNAmVH9C",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nana/2017/0604/0/0604a138",
"title": "Development of a Hybrid FSO/RF System during Link Misalignment",
"doi": null,
"abstractUrl": "/proceedings-article/nana/2017/0604a138/12OmNAtst6R",
"parentPublication": {
"id": "proceedings/nana/2017/0604/0",
"title": "2017 International Conference on Networking and Network Applications (NaNA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031e552",
"title": "Research on Light Polarization FSO-OFDM System",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031e552/12OmNvDqsLP",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2015/8930/0/07056049",
"title": "Augmenting low-latency HPC network with free-space optical links",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2015/07056049/12OmNylboGv",
"parentPublication": {
"id": "proceedings/hpca/2015/8930/0",
"title": "2015 IEEE 21st International Symposium on High Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726710",
"title": "Calculations of the impact on atmospheric turbulence conditions on free space optical communication links using gamma-gamma model",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726710/12OmNzcPAds",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aici/2010/4225/2/4225b459",
"title": "Field-of-View Optimization of FSO Receiver Using Real-coded Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/aici/2010/4225b459/12OmNzt0IGq",
"parentPublication": {
"id": "proceedings/aici/2010/4225/2",
"title": "Artificial Intelligence and Computational Intelligence, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wimob/2016/0724/0/07763233",
"title": "Extended measurement tests of dual polarization radio over fiber and radio over FSO fronthaul in LTE C-RAN architecture",
"doi": null,
"abstractUrl": "/proceedings-article/wimob/2016/07763233/12OmNzxyiGi",
"parentPublication": {
"id": "proceedings/wimob/2016/0724/0",
"title": "2016 IEEE 12th International Conference on Wireless and Mobile Computing, Networking and Communications (WiMob)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2010/4236/0/05633803",
"title": "Design and Evaluation of Fiber Direct Coupling Optical Antennas for Next Generation Optical Wireless Communication Systems",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2010/05633803/183rAg72CqZ",
"parentPublication": {
"id": "proceedings/bwcca/2010/4236/0",
"title": "2010 International Conference on Broadband, Wireless Computing, Communication and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imitec/2021/1749/0/09714573",
"title": "Assessment of Atmospheric Turbulence Effect on the Free Space Optical Communications links for Optimum Performance, over Western Cape, South Africa",
"doi": null,
"abstractUrl": "/proceedings-article/imitec/2021/09714573/1BaZNnRbSX6",
"parentPublication": {
"id": "proceedings/imitec/2021/1749/0",
"title": "2021 3rd International Multidisciplinary Information Technology and Engineering Conference (IMITEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2020/4905/0/09049690",
"title": "Fast Neighbor Discovery in MEMS FSO Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2020/09049690/1iERQrjhHLG",
"parentPublication": {
"id": "proceedings/icnc/2020/4905/0",
"title": "2020 International Conference on Computing, Networking and Communications (ICNC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC2OSLm",
"doi": "",
"title": "A visual marker for precise pose estimation based on a microlens array",
"normalizedTitle": "A visual marker for precise pose estimation based on a microlens array",
"abstract": "Existing planar visual markers (AR markers) have poor accuracy in pose estimation, especially in frontal direction. We solved the problem by a novel principle using a microlens array. The marker displays a changing two-dimensional moiré pattern according to the visual-line direction. We can extract pose information from the pattern by image processing. The developed marker and the processing algorithm enable high-accuracy pose estimation even by observation from frontal direction. We verified its superiority to the conventional method by some validation tests.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing planar visual markers (AR markers) have poor accuracy in pose estimation, especially in frontal direction. We solved the problem by a novel principle using a microlens array. The marker displays a changing two-dimensional moiré pattern according to the visual-line direction. We can extract pose information from the pattern by image processing. The developed marker and the processing algorithm enable high-accuracy pose estimation even by observation from frontal direction. We verified its superiority to the conventional method by some validation tests.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing planar visual markers (AR markers) have poor accuracy in pose estimation, especially in frontal direction. We solved the problem by a novel principle using a microlens array. The marker displays a changing two-dimensional moiré pattern according to the visual-line direction. We can extract pose information from the pattern by image processing. The developed marker and the processing algorithm enable high-accuracy pose estimation even by observation from frontal direction. We verified its superiority to the conventional method by some validation tests.",
"fno": "06460264",
"keywords": [
"Augmented Reality",
"Information Retrieval",
"Microlenses",
"Pose Estimation",
"Planar Visual Marker",
"Pose Estimation",
"Frontal Direction",
"Microlens Array",
"2 D Moire Pattern",
"Visual Line Direction",
"Pose Information Extraction",
"Image Processing",
"Lenses",
"Estimation",
"Arrays",
"Visualization",
"Accuracy",
"Microoptics",
"Image Processing"
],
"authors": [
{
"affiliation": "National Institute of Advanced Industrial Science and Technology",
"fullName": "Hideyuki Tanaka",
"givenName": "Hideyuki",
"surname": "Tanaka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Institute of Advanced Industrial Science and Technology",
"fullName": "Yasushi Sumi",
"givenName": "Yasushi",
"surname": "Sumi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Institute of Advanced Industrial Science and Technology",
"fullName": "Yoshio Matsumoto",
"givenName": "Yoshio",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "837-840",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460262",
"articleId": "12OmNzkuKCx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460265",
"articleId": "12OmNBOll2F",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032b481",
"title": "ChromaTag: A Colored Marker and Fast Detection Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b481/12OmNC2OSIa",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671813",
"title": "Further stabilization of a microlens-array-based fiducial marker",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671813/12OmNCfAPL4",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643605",
"title": "Floyd-Warshall all-pair shortest path for accurate multi-marker calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643605/12OmNs59JEg",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720368",
"title": "On the Design and Evaluation of a Precise Scalable Fiducial Marker Framework",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720368/12OmNwNwzQw",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a487",
"title": "The Geometry of Colorful, Lenticular Fiducial Markers",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a487/12OmNz61dwY",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09773975",
"title": "DeepTag: A General Framework for Fiducial Marker Design and Detection",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09773975/1DjDnSMD9n2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300i428",
"title": "Deep ChArUco: Dark ChArUco Marker Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300i428/1gyrHkGaO0E",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09072595",
"title": "TopoTag: A Robust and Scalable Topological Fiducial Marker System",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09072595/1jbj4gWAcZa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a255",
"title": "Stencil Marker: Designing Partially Transparent Markers for Stacking Augmented Reality Objects",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a255/1pBMkhmVP7a",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100d706",
"title": "Underwater marker-based pose-estimation with associated uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100d706/1yNhtlHduJW",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNs0C9QD",
"title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots",
"acronym": "memsys",
"groupId": "1000438",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqC2uVO",
"doi": "10.1109/MEMSYS.1997.581856",
"title": "Optical properties of a Si binary optic microlens for infrared ray",
"normalizedTitle": "Optical properties of a Si binary optic microlens for infrared ray",
"abstract": "We established fabrication technologies for a binary optic microlens. A Si etch rate of 1.5 /spl mu/m/min and Si/photoresist selectivity of 40 were achieved when the RIE parameters were a ratio of O/sub 2/ in the SF/sub 6/-O/sub 2/ etching gas of 0.2 at a pressure of 100 mTorr and an rf power density of 0.2 W/cm/sup 2/. We also obtained good optical properties for a micro pyroelectric infrared sensor constructed with a Si binary optic microlens. The four-phase response is 1.9 times and the eight-phase response is 2.4 times larger than the two-phase response. These results nearly agree with those of the simulation for the phase microlens diffraction efficiency ratio.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We established fabrication technologies for a binary optic microlens. A Si etch rate of 1.5 /spl mu/m/min and Si/photoresist selectivity of 40 were achieved when the RIE parameters were a ratio of O/sub 2/ in the SF/sub 6/-O/sub 2/ etching gas of 0.2 at a pressure of 100 mTorr and an rf power density of 0.2 W/cm/sup 2/. We also obtained good optical properties for a micro pyroelectric infrared sensor constructed with a Si binary optic microlens. The four-phase response is 1.9 times and the eight-phase response is 2.4 times larger than the two-phase response. These results nearly agree with those of the simulation for the phase microlens diffraction efficiency ratio.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We established fabrication technologies for a binary optic microlens. A Si etch rate of 1.5 /spl mu/m/min and Si/photoresist selectivity of 40 were achieved when the RIE parameters were a ratio of O/sub 2/ in the SF/sub 6/-O/sub 2/ etching gas of 0.2 at a pressure of 100 mTorr and an rf power density of 0.2 W/cm/sup 2/. We also obtained good optical properties for a micro pyroelectric infrared sensor constructed with a Si binary optic microlens. The four-phase response is 1.9 times and the eight-phase response is 2.4 times larger than the two-phase response. These results nearly agree with those of the simulation for the phase microlens diffraction efficiency ratio.",
"fno": "00581856",
"keywords": [
"Silicon",
"Integrated Optics",
"Lenses",
"Optical Fabrication",
"Sputter Etching",
"Microsensors",
"Infrared Detectors",
"Pyroelectric Detectors",
"Elemental Semiconductors",
"Si Binary Optic Microlens",
"Fabrication Technologies",
"Si Etch Rate",
"Si Photoresist Selectivity",
"RIE Parameters",
"SF Sub 6 O Sub 2 Etching Gas",
"Rf Power Density",
"Micro Pyroelectric Infrared Sensor",
"Four Phase Response",
"Eight Phase Response",
"Simulation",
"Phase Microlens Diffraction Efficiency Ratio",
"100 Mtorr",
"Si",
"SF Sub 6 O Sub 2",
"SF Sub 6",
"O Sub 2",
"Lenses",
"Microoptics",
"Optical Sensors",
"Etching",
"Time Factors",
"Optical Device Fabrication",
"Resists",
"Pyroelectricity",
"Infrared Sensors",
"Optical Diffraction"
],
"authors": [
{
"affiliation": "Matsusita Electron. Components Co. Ltd., Osaka, Japan",
"fullName": "K. Fujikawa",
"givenName": "K.",
"surname": "Fujikawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "G. Hirakawa",
"givenName": "G.",
"surname": "Hirakawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "T. Shiono",
"givenName": "T.",
"surname": "Shiono",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "K. Nomura",
"givenName": "K.",
"surname": "Nomura",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "memsys",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-01-01T00:00:00",
"pubType": "proceedings",
"pages": "360,361,362,363,364,365",
"year": "1997",
"issn": "1084-6999",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00581854",
"articleId": "12OmNyLiuAu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00581858",
"articleId": "12OmNwkzuvD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ssst/1991/2190/0/00138534",
"title": "A new application for acousto-optic cells in wavelength division demultiplexing",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1991/00138534/12OmNB9t6oJ",
"parentPublication": {
"id": "proceedings/ssst/1991/2190/0",
"title": "The Twenty-Third Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2015/7983/0/07367701",
"title": "Optic disc localization using local vessel based features and support vector machine",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2015/07367701/12OmNBv2CdE",
"parentPublication": {
"id": "proceedings/bibe/2015/7983/0",
"title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1994/5320/0/00287828",
"title": "Pulse-width modulation using an acousto-optic modulator",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1994/00287828/12OmNvA1h3T",
"parentPublication": {
"id": "proceedings/ssst/1994/5320/0",
"title": "Proceedings of 26th Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2012/1365/0/06274854",
"title": "The Inherent Temperature Compensation to Fiber-Optic Current Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2012/06274854/12OmNyxXlvT",
"parentPublication": {
"id": "proceedings/cso/2012/1365/0",
"title": "2012 Fifth International Joint Conference on Computational Sciences and Optimization (CSO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2011/1732/0/06042504",
"title": "Micro-optic technology for the microfluidics feedback",
"doi": null,
"abstractUrl": "/proceedings-article/case/2011/06042504/12OmNzwpUjK",
"parentPublication": {
"id": "proceedings/case/2011/1732/0",
"title": "2011 IEEE International Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2019/2286/0/228600a489",
"title": "Optic Disc and Cup Segmentation for Glaucoma Characterization Using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2019/228600a489/1cdO2xs7urm",
"parentPublication": {
"id": "proceedings/cbms/2019/2286/0",
"title": "2019 IEEE 32nd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998293",
"title": "ThinVR: Heterogeneous microlens arrays for compact, 180 degree FOV VR near-eye displays",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998293/1hrXiCmKkak",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2019/5686/0/568600a255",
"title": "A Three Phases Procedure for Optic Disc Segmentation in Retinal Images",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2019/568600a255/1j9xDJrYSIg",
"parentPublication": {
"id": "proceedings/sitis/2019/5686/0",
"title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2020/9627/0/962700a134",
"title": "Recent Progress of All Fiber Optic Current Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2020/962700a134/1rvCwLNIFaw",
"parentPublication": {
"id": "proceedings/ifeea/2020/9627/0",
"title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b125",
"title": "Ray Tracing-Guided Design of Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b125/1zWEpFekVbi",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxFJXGd",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCbU2UT",
"doi": "10.1109/ICME.2017.8019294",
"title": "Improving acoustic modeling using audio-visual speech",
"normalizedTitle": "Improving acoustic modeling using audio-visual speech",
"abstract": "Reliable visual features that encode the articulator movements of speakers can dramatically improve the decoding accuracy of automatic speech recognition systems when combined with the corresponding acoustic signals. In this paper, a novel framework is proposed to utilize audio-visual speech not only during decoding but also for training better acoustic models. In this framework, a multi-stream hidden Markov model is iteratively deployed to fuse audio and video likelihoods. The fused likelihoods are used to estimate enhanced frame-state alignments, which are finally used as better training targets. The proposed framework is so flexible that it can be partially used to train acoustic models with the available audio-visual data while a conventional training strategy can be followed with the remaining acoustic data. The experimental results show that the acoustic models trained using the proposed audio-visual framework perform significantly better than those trained conventionally with solely acoustic data in clean and noisy conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Reliable visual features that encode the articulator movements of speakers can dramatically improve the decoding accuracy of automatic speech recognition systems when combined with the corresponding acoustic signals. In this paper, a novel framework is proposed to utilize audio-visual speech not only during decoding but also for training better acoustic models. In this framework, a multi-stream hidden Markov model is iteratively deployed to fuse audio and video likelihoods. The fused likelihoods are used to estimate enhanced frame-state alignments, which are finally used as better training targets. The proposed framework is so flexible that it can be partially used to train acoustic models with the available audio-visual data while a conventional training strategy can be followed with the remaining acoustic data. The experimental results show that the acoustic models trained using the proposed audio-visual framework perform significantly better than those trained conventionally with solely acoustic data in clean and noisy conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Reliable visual features that encode the articulator movements of speakers can dramatically improve the decoding accuracy of automatic speech recognition systems when combined with the corresponding acoustic signals. In this paper, a novel framework is proposed to utilize audio-visual speech not only during decoding but also for training better acoustic models. In this framework, a multi-stream hidden Markov model is iteratively deployed to fuse audio and video likelihoods. The fused likelihoods are used to estimate enhanced frame-state alignments, which are finally used as better training targets. The proposed framework is so flexible that it can be partially used to train acoustic models with the available audio-visual data while a conventional training strategy can be followed with the remaining acoustic data. The experimental results show that the acoustic models trained using the proposed audio-visual framework perform significantly better than those trained conventionally with solely acoustic data in clean and noisy conditions.",
"fno": "08019294",
"keywords": [
"Visualization",
"Acoustics",
"Hidden Markov Models",
"Training",
"Feature Extraction",
"Speech",
"Speech Recognition",
"Audio Visual Automatic Speech Recognition",
"Multi Stream HMM",
"Acoustic Modeling",
"Audio Visual Fusion",
"Noise Robustness"
],
"authors": [
{
"affiliation": "International Computer Science Institute, Berkeley, USA",
"fullName": "Ahmed Hussen Abdelaziz",
"givenName": "Ahmed Hussen",
"surname": "Abdelaziz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1081-1086",
"year": "2017",
"issn": "1945-788X",
"isbn": "978-1-5090-6067-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08019293",
"articleId": "12OmNzvz6Hl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08019295",
"articleId": "12OmNAWH9uJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2015/9953/0/07344606",
"title": "GMM-based synchronization rules for HMM-based audio-visual laughter synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344606/12OmNBJNL1i",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a154",
"title": "Listening with Your Eyes: Towards a Practical Visual Speech Recognition System Using Deep Boltzmann Machines",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a154/12OmNrkT7wo",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/2/05745027",
"title": "A coupled HMM for audio-visual speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745027/12OmNx8wTmZ",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/2",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/1/01326121",
"title": "A stream-weight optimization method for audio-visual speech recognition using multi-stream HMMs",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326121/12OmNxbEtH9",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/2/05745026",
"title": "Audio-visual speech modeling using coupled hidden Markov models",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745026/12OmNzA6GH8",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/2",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/2/05745029",
"title": "Multi-stream product modal audio-visual integration strategy for robust adaptive speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745029/12OmNzG4gta",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/2",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/3/01394568",
"title": "Multistage information fusion for audio-visual speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394568/12OmNzRqdJ6",
"parentPublication": {
"id": "proceedings/icme/2004/8603/3",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/08585066",
"title": "Deep Audio-Visual Speech Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/08585066/17D45VtKiwZ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i217",
"title": "Audio-Visual Speech Codecs: Rethinking Audio-Visual Speech Enhancement by Re-Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i217/1H1lGYeYRDW",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093307",
"title": "Audio-Visual Model Distillation Using Acoustic Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093307/1jPbtIN9OWA",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyQYtf2",
"title": "2017 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqNosaP",
"doi": "10.1109/3DV.2017.00076",
"title": "3D Room Geometry Reconstruction Using Audio-Visual Sensors",
"normalizedTitle": "3D Room Geometry Reconstruction Using Audio-Visual Sensors",
"abstract": "In this paper we propose a cuboid-based air-tight indoor room geometry estimation method using combination of audio-visual sensors. Existing vision-based 3D reconstruction methods are not applicable for scenes with transparent or reflective objects such as windows and mirrors. In this work we fuse multi-modal sensory information to overcome the limitations of purely visual reconstruction for reconstruction of complex scenes including transparent and mirror surfaces. A full scene is captured by 360Z_$^{\\circ}$_Z cameras and acoustic room impulse responses (RIRs) recorded by a loudspeaker and compact microphone array. Depth information of the scene is recovered by stereo matching from the captured images and estimation of major acoustic reflector locations from the sound. The coordinate systems for audio-visual sensors are aligned into a unified reference frame and plane elements are reconstructed from audio-visual data. Finally cuboid proxies are fitted to the planes to generate a complete room model. Experimental results show that the proposed system generates complete representations of the room structures regardless of transparent windows, featureless walls and shiny surfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we propose a cuboid-based air-tight indoor room geometry estimation method using combination of audio-visual sensors. Existing vision-based 3D reconstruction methods are not applicable for scenes with transparent or reflective objects such as windows and mirrors. In this work we fuse multi-modal sensory information to overcome the limitations of purely visual reconstruction for reconstruction of complex scenes including transparent and mirror surfaces. A full scene is captured by 360$^{\\circ}$ cameras and acoustic room impulse responses (RIRs) recorded by a loudspeaker and compact microphone array. Depth information of the scene is recovered by stereo matching from the captured images and estimation of major acoustic reflector locations from the sound. The coordinate systems for audio-visual sensors are aligned into a unified reference frame and plane elements are reconstructed from audio-visual data. Finally cuboid proxies are fitted to the planes to generate a complete room model. Experimental results show that the proposed system generates complete representations of the room structures regardless of transparent windows, featureless walls and shiny surfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we propose a cuboid-based air-tight indoor room geometry estimation method using combination of audio-visual sensors. Existing vision-based 3D reconstruction methods are not applicable for scenes with transparent or reflective objects such as windows and mirrors. In this work we fuse multi-modal sensory information to overcome the limitations of purely visual reconstruction for reconstruction of complex scenes including transparent and mirror surfaces. A full scene is captured by 360- cameras and acoustic room impulse responses (RIRs) recorded by a loudspeaker and compact microphone array. Depth information of the scene is recovered by stereo matching from the captured images and estimation of major acoustic reflector locations from the sound. The coordinate systems for audio-visual sensors are aligned into a unified reference frame and plane elements are reconstructed from audio-visual data. Finally cuboid proxies are fitted to the planes to generate a complete room model. Experimental results show that the proposed system generates complete representations of the room structures regardless of transparent windows, featureless walls and shiny surfaces.",
"fno": "261001a621",
"keywords": [
"Acoustic Signal Processing",
"Architectural Acoustics",
"Cameras",
"Computer Vision",
"Geometry",
"Image Matching",
"Image Reconstruction",
"Image Sensors",
"Loudspeakers",
"Microphone Arrays",
"Robot Vision",
"Stereo Image Processing",
"Transient Response",
"3 D Room Geometry Reconstruction",
"Audio Visual Sensors",
"Cuboid Based Air Tight Indoor Room Geometry Estimation Method",
"Transparent Objects",
"Reflective Objects",
"Multimodal Sensory Information",
"Purely Visual Reconstruction",
"Complex Scenes",
"Transparent Mirror Surfaces",
"Acoustic Room Impulse Responses",
"Audio Visual Data",
"Complete Room Model",
"Room Structures",
"Cameras",
"Acoustics",
"Microphones",
"Sensors",
"Three Dimensional Displays",
"Geometry",
"Image Reconstruction",
"Indoor Room Geometry Estimation",
"Audio Visual Processing",
"3 D Reconstruction"
],
"authors": [
{
"affiliation": null,
"fullName": "Hansung Kim",
"givenName": "Hansung",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Luca Remaggi",
"givenName": "Luca",
"surname": "Remaggi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Philip JB Jackson",
"givenName": "Philip JB",
"surname": "Jackson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Filippo Maria Fazi",
"givenName": "Filippo Maria",
"surname": "Fazi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Adrian Hilton",
"givenName": "Adrian",
"surname": "Hilton",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "621-629",
"year": "2017",
"issn": "2475-7888",
"isbn": "978-1-5386-2610-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "261001a612",
"articleId": "12OmNwDj1gn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "261001a630",
"articleId": "12OmNxwncfo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2014/1812/0/07300636",
"title": "Active Speaker Detection using audio-visual sensor array",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2014/07300636/12OmNAS9zqy",
"parentPublication": {
"id": "proceedings/isspit/2014/1812/0",
"title": "2014 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2010/8049/0/05590754",
"title": "Gaze-tracking and Acoustic Vector Sensors Technologies for PTZ Camera Steering and Acoustic Event Detection",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2010/05590754/12OmNwMFMgm",
"parentPublication": {
"id": "proceedings/dexa/2010/8049/0",
"title": "2010 Workshops on Database and Expert Systems Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733b907",
"title": "Spotting Audio-Visual Inconsistencies (SAVI) in Manipulated Video",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733b907/12OmNx5piVV",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a519",
"title": "Room Layout Estimation with Object and Material Attributes Information Using a Spherical Camera",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a519/12OmNyv7meo",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/08/07970123",
"title": "3D Reconstruction in the Presence of Glass and Mirrors by Acoustic and Visual Fusion",
"doi": null,
"abstractUrl": "/journal/tp/2018/08/07970123/13rRUwcAqrs",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c051",
"title": "LayoutNet: Reconstructing the 3D Room Layout from a Single RGB Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c051/17D45W9KVIS",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200b163",
"title": "Audio-Visual Floorplan Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200b163/1BmIugs4u1G",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8836",
"title": "Visual Acoustic Matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8836/1H0LBaUp66Y",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798247",
"title": "Immersive Spatial Audio Reproduction for VR/AR Using Room Acoustic Modelling from 360° Images",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798247/1cJ1gHhXwha",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093307",
"title": "Audio-Visual Model Distillation Using Acoustic Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093307/1jPbtIN9OWA",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0LBaUp66Y",
"doi": "10.1109/CVPR52688.2022.01829",
"title": "Visual Acoustic Matching",
"normalizedTitle": "Visual Acoustic Matching",
"abstract": "We introduce the visual acoustic matching task, in which an audio clip is transformed to sound like it was recorded in a target environment. Given an image of the target environment and a waveform for the source audio, the goal is to re-synthesize the audio to match the target room acoustics as suggested by its visible geometry and materials. To address this novel task, we propose a cross-modal transformer model that uses audio-visual attention to inject visual properties into the audio and generate realistic audio output. In addition, we devise a self-supervised training objective that can learn acoustic matching from in-the-wild Web videos, despite their lack of acoustically mismatched audio. We demonstrate that our approach successfully translates human speech to a variety of real-world environments depicted in images, outperforming both traditional acoustic matching and more heavily supervised baselines.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce the visual acoustic matching task, in which an audio clip is transformed to sound like it was recorded in a target environment. Given an image of the target environment and a waveform for the source audio, the goal is to re-synthesize the audio to match the target room acoustics as suggested by its visible geometry and materials. To address this novel task, we propose a cross-modal transformer model that uses audio-visual attention to inject visual properties into the audio and generate realistic audio output. In addition, we devise a self-supervised training objective that can learn acoustic matching from in-the-wild Web videos, despite their lack of acoustically mismatched audio. We demonstrate that our approach successfully translates human speech to a variety of real-world environments depicted in images, outperforming both traditional acoustic matching and more heavily supervised baselines.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce the visual acoustic matching task, in which an audio clip is transformed to sound like it was recorded in a target environment. Given an image of the target environment and a waveform for the source audio, the goal is to re-synthesize the audio to match the target room acoustics as suggested by its visible geometry and materials. To address this novel task, we propose a cross-modal transformer model that uses audio-visual attention to inject visual properties into the audio and generate realistic audio output. In addition, we devise a self-supervised training objective that can learn acoustic matching from in-the-wild Web videos, despite their lack of acoustically mismatched audio. We demonstrate that our approach successfully translates human speech to a variety of real-world environments depicted in images, outperforming both traditional acoustic matching and more heavily supervised baselines.",
"fno": "694600s8836",
"keywords": [
"Architectural Acoustics",
"Audio Signal Processing",
"Audio Visual Systems",
"Image Classification",
"Internet",
"Learning Artificial Intelligence",
"Video Signal Processing",
"Real World Environments",
"Traditional Acoustic Matching",
"Visual Acoustic Matching Task",
"Audio Clip",
"Target Environment",
"Source Audio",
"Target Room Acoustics",
"Visible Geometry",
"Cross Modal Transformer Model",
"Audio Visual Attention",
"Visual Properties",
"Realistic Audio Output",
"Self Supervised Training Objective",
"Acoustically Mismatched Audio",
"Training",
"Geometry",
"Visualization",
"Computer Vision",
"Computational Modeling",
"Transformers",
"Acoustics"
],
"authors": [
{
"affiliation": "University of Texas at Austin",
"fullName": "Changan Chen",
"givenName": "Changan",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stanford University",
"fullName": "Ruohan Gao",
"givenName": "Ruohan",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Reality Labs Research at Meta",
"fullName": "Paul Calamia",
"givenName": "Paul",
"surname": "Calamia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Texas at Austin",
"fullName": "Kristen Grauman",
"givenName": "Kristen",
"surname": "Grauman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "18836-18846",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0LB7w4kJa",
"name": "pcvpr202269460-09879986s1-mm_694600s8836.zip",
"size": "6.31 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879986s1-mm_694600s8836.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600s8825",
"articleId": "1H1i6NZg9xK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600s8847",
"articleId": "1H0LxkY0vXW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2015/9953/0/07344606",
"title": "GMM-based synchronization rules for HMM-based audio-visual laughter synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344606/12OmNBJNL1i",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019294",
"title": "Improving acoustic modeling using audio-visual speech",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019294/12OmNCbU2UT",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a621",
"title": "3D Room Geometry Reconstruction Using Audio-Visual Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a621/12OmNqNosaP",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2015/7343/0/07314306",
"title": "Visual Fingerprints of the Acoustic Environment: The Use of Acoustic Indices to Characterise Natural Habitats",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314306/12OmNrHjqJv",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2017/0771/0/08109785",
"title": "Towards multimodal saliency detection: An enhancement of audio-visual correlation estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2017/08109785/12OmNvUaNkH",
"parentPublication": {
"id": "proceedings/icci*cc/2017/0771/0",
"title": "2017 IEEE 16th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2018/9156/0/08643451",
"title": "Visualizing five decades of environmental acoustic data",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2018/08643451/17QjJeBrIZk",
"parentPublication": {
"id": "proceedings/e-science/2018/9156/0",
"title": "2018 IEEE 14th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798247",
"title": "Immersive Spatial Audio Reproduction for VR/AR Using Room Acoustic Modelling from 360° Images",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798247/1cJ1gHhXwha",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g291",
"title": "Dual Attention Matching for Audio-Visual Event Localization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g291/1hQqiECKdCE",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093307",
"title": "Audio-Visual Model Distillation Using Acoustic Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093307/1jPbtIN9OWA",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5511",
"title": "Semantic Audio-Visual Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5511/1yeJKiErs0E",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxzQzr0EU",
"doi": "10.1109/VRW50115.2020.00119",
"title": "Scene-aware Sound Rendering in Virtual and Real Worlds",
"normalizedTitle": "Scene-aware Sound Rendering in Virtual and Real Worlds",
"abstract": "Modern computer graphics applications including virtual reality (VR) and augmented reality (AR) have adopted techniques for both visual rendering and audio rendering. While visual rendering can already synthesize virtual objects into the real world seamlessly, it remains difficult to correctly blend virtual sound with real-world sound using state-of-the-art audio rendering. When the virtual sound is generated unaware of the scene, the corresponding application becomes less immersive, especially for AR. In this position paper, we present our current work on generating scene-aware sound using ray-tracing based simulation combined with deep learning and optimization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modern computer graphics applications including virtual reality (VR) and augmented reality (AR) have adopted techniques for both visual rendering and audio rendering. While visual rendering can already synthesize virtual objects into the real world seamlessly, it remains difficult to correctly blend virtual sound with real-world sound using state-of-the-art audio rendering. When the virtual sound is generated unaware of the scene, the corresponding application becomes less immersive, especially for AR. In this position paper, we present our current work on generating scene-aware sound using ray-tracing based simulation combined with deep learning and optimization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modern computer graphics applications including virtual reality (VR) and augmented reality (AR) have adopted techniques for both visual rendering and audio rendering. While visual rendering can already synthesize virtual objects into the real world seamlessly, it remains difficult to correctly blend virtual sound with real-world sound using state-of-the-art audio rendering. When the virtual sound is generated unaware of the scene, the corresponding application becomes less immersive, especially for AR. In this position paper, we present our current work on generating scene-aware sound using ray-tracing based simulation combined with deep learning and optimization.",
"fno": "09090553",
"keywords": [
"Rendering Computer Graphics",
"Acoustics",
"Visualization",
"Geometry",
"Acoustic Measurements",
"Solid Modeling",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Mixed Augmented Reality",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Maryland",
"fullName": "Zhenyu Tang",
"givenName": "Zhenyu",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland",
"fullName": "Dinesh Manocha",
"givenName": "Dinesh",
"surname": "Manocha",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "535-536",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090475",
"articleId": "1jIxjhubDfq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090482",
"articleId": "1jIxrm4VUxG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2004/8603/1/01394276",
"title": "Progressive sound rendering in multimedia applications",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394276/12OmNBghttT",
"parentPublication": {
"id": "proceedings/icme/2004/8603/1",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/3/01394611",
"title": "Distributed sound rendering for interactive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394611/12OmNBmf3ae",
"parentPublication": {
"id": "proceedings/icme/2004/8603/3",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802057",
"title": "Design and evaluation of Binaural auditory rendering for CAVEs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802057/12OmNCctfc5",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042282",
"title": "Spatial Sound Rendering Using Measured Room Impulse Responses",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042282/12OmNy3iFfx",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1672",
"title": "Interactive sound rendering in complex and dynamic scenes using frustum tracing",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1672/13rRUxNEqPJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577131",
"title": "Binaural Rendering for Sound Navigation and Orientation",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577131/17D45XoXP3v",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a226",
"title": "Lightweight Scene-aware Rain Sound Simulation for Interactive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a226/1MNgVbw2hc4",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798177",
"title": "Perceptual Study of Near-Field Binaural Audio Rendering in Six-Degrees-of-Freedom Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798177/1cJ13xpYvE4",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998301",
"title": "Scene-Aware Audio Rendering via Deep Acoustic Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998301/1hpPBqG2djy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a487",
"title": "Vision-based Acoustic Information Retrieval for Interactive Sound Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a487/1yfxLit9Jw4",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jPbbHBGDHq",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jPbtIN9OWA",
"doi": "10.1109/WACV45572.2020.9093307",
"title": "Audio-Visual Model Distillation Using Acoustic Images",
"normalizedTitle": "Audio-Visual Model Distillation Using Acoustic Images",
"abstract": "In this paper, we investigate how to learn rich and robust feature representations for audio classification from visual data and acoustic images, a novel audio data modality. Former models learn audio representations from raw signals or spectral data acquired by a single microphone, with remarkable results in classification and retrieval. However, such representations are not so robust towards variable environmental sound conditions. We tackle this drawback by exploiting a new multimodal labeled action recognition dataset acquired by a hybrid audio-visual sensor that provides RGB video, raw audio signals, and spatialized acoustic data, also known as acoustic images, where the visual and acoustic images are aligned in space and synchronized in time. Using this richer information, we train audio deep learning models in a teacher-student fashion. In particular, we distill knowledge into audio networks from both visual and acoustic image teachers. Our experiments suggest that the learned representations are more powerful and have better generalization capabilities than the features learned from models trained using just single-microphone audio data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we investigate how to learn rich and robust feature representations for audio classification from visual data and acoustic images, a novel audio data modality. Former models learn audio representations from raw signals or spectral data acquired by a single microphone, with remarkable results in classification and retrieval. However, such representations are not so robust towards variable environmental sound conditions. We tackle this drawback by exploiting a new multimodal labeled action recognition dataset acquired by a hybrid audio-visual sensor that provides RGB video, raw audio signals, and spatialized acoustic data, also known as acoustic images, where the visual and acoustic images are aligned in space and synchronized in time. Using this richer information, we train audio deep learning models in a teacher-student fashion. In particular, we distill knowledge into audio networks from both visual and acoustic image teachers. Our experiments suggest that the learned representations are more powerful and have better generalization capabilities than the features learned from models trained using just single-microphone audio data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we investigate how to learn rich and robust feature representations for audio classification from visual data and acoustic images, a novel audio data modality. Former models learn audio representations from raw signals or spectral data acquired by a single microphone, with remarkable results in classification and retrieval. However, such representations are not so robust towards variable environmental sound conditions. We tackle this drawback by exploiting a new multimodal labeled action recognition dataset acquired by a hybrid audio-visual sensor that provides RGB video, raw audio signals, and spatialized acoustic data, also known as acoustic images, where the visual and acoustic images are aligned in space and synchronized in time. Using this richer information, we train audio deep learning models in a teacher-student fashion. In particular, we distill knowledge into audio networks from both visual and acoustic image teachers. Our experiments suggest that the learned representations are more powerful and have better generalization capabilities than the features learned from models trained using just single-microphone audio data.",
"fno": "09093307",
"keywords": [
"Acoustic Signal Processing",
"Convolutional Neural Nets",
"Feature Extraction",
"Image Recognition",
"Image Representation",
"Learning Artificial Intelligence",
"Microphones",
"Audio Classification",
"Visual Data",
"Acoustic Images",
"Audio Representations",
"Spectral Data",
"Variable Environmental Sound Conditions",
"Multimodal Labeled Action Recognition Dataset",
"Hybrid Audio Visual Sensor",
"Acoustic Data",
"Visual Images",
"Audio Deep Learning Models",
"Audio Networks",
"Visual Image Teachers",
"Acoustic Image Teachers",
"Learned Representations",
"Single Microphone Audio Data",
"Audio Visual Model Distillation",
"Feature Representations",
"Audio Data Modality",
"Acoustics",
"Visualization",
"Data Models",
"Training",
"Microphones",
"Machine Learning",
"Synchronization"
],
"authors": [
{
"affiliation": "Istituto Italiano di Tecnologia,Pattern Analysis & Computer Vision",
"fullName": "Andrés F. Pérez",
"givenName": "Andrés F.",
"surname": "Pérez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Istituto Italiano di Tecnologia,Pattern Analysis & Computer Vision",
"fullName": "Valentina Sanguineti",
"givenName": "Valentina",
"surname": "Sanguineti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Istituto Italiano di Tecnologia,Pattern Analysis & Computer Vision",
"fullName": "Pietro Morerio",
"givenName": "Pietro",
"surname": "Morerio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Istituto Italiano di Tecnologia,Pattern Analysis & Computer Vision",
"fullName": "Vittorio Murino",
"givenName": "Vittorio",
"surname": "Murino",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "2843-2852",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6553-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09093441",
"articleId": "1jPbnO7XlFC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09093386",
"articleId": "1jPbzspqdlS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2015/9953/0/07344606",
"title": "GMM-based synchronization rules for HMM-based audio-visual laughter synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344606/12OmNBJNL1i",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019294",
"title": "Improving acoustic modeling using audio-visual speech",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019294/12OmNCbU2UT",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a621",
"title": "3D Room Geometry Reconstruction Using Audio-Visual Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a621/12OmNqNosaP",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/host/2018/4731/0/08383887",
"title": "Zero-permission acoustic cross-device tracking",
"doi": null,
"abstractUrl": "/proceedings-article/host/2018/08383887/12OmNz2C1xm",
"parentPublication": {
"id": "proceedings/host/2018/4731/0",
"title": "2018 IEEE International Symposium on Hardware Oriented Security and Trust (HOST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2019/03/08370653",
"title": "Dolphin: Real-Time Hidden Acoustic Signal Capture with Smartphones",
"doi": null,
"abstractUrl": "/journal/tm/2019/03/08370653/17D45We0UEC",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8836",
"title": "Visual Acoustic Matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8836/1H0LBaUp66Y",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798247",
"title": "Immersive Spatial Audio Reproduction for VR/AR Using Room Acoustic Modelling from 360° Images",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798247/1cJ1gHhXwha",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g291",
"title": "Dual Attention Matching for Audio-Visual Event Localization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g291/1hQqiECKdCE",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428081",
"title": "Multimodal Transformer Networks with Latent Interaction for Audio-Visual Event Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428081/1uilRgsbUFa",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5511",
"title": "Semantic Audio-Visual Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5511/1yeJKiErs0E",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy1SFMf",
"doi": "10.1109/CVPR.2017.255",
"title": "Deep outdoor illumination estimation",
"normalizedTitle": "Deep outdoor illumination estimation",
"abstract": "We present a CNN-based technique to estimate high-dynamic range outdoor illumination from a single low dynamic range image. To train the CNN, we leverage a large dataset of outdoor panoramas. We fit a low-dimensional physically-based outdoor illumination model to the skies in these panoramas giving us a compact set of parameters (including sun position, atmospheric conditions, and camera parameters). We extract limited field-of-view images from the panoramas, and train a CNN with this large set of input image-output lighting parameter pairs. Given a test image, this network can be used to infer illumination parameters that can, in turn, be used to reconstruct an outdoor illumination environment map. We demonstrate that our approach allows the recovery of plausible illumination conditions and enables photorealistic virtual object insertion from a single image. An extensive evaluation on both the panorama dataset and captured HDR environment maps shows that our technique significantly outperforms previous solutions to this problem.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a CNN-based technique to estimate high-dynamic range outdoor illumination from a single low dynamic range image. To train the CNN, we leverage a large dataset of outdoor panoramas. We fit a low-dimensional physically-based outdoor illumination model to the skies in these panoramas giving us a compact set of parameters (including sun position, atmospheric conditions, and camera parameters). We extract limited field-of-view images from the panoramas, and train a CNN with this large set of input image-output lighting parameter pairs. Given a test image, this network can be used to infer illumination parameters that can, in turn, be used to reconstruct an outdoor illumination environment map. We demonstrate that our approach allows the recovery of plausible illumination conditions and enables photorealistic virtual object insertion from a single image. An extensive evaluation on both the panorama dataset and captured HDR environment maps shows that our technique significantly outperforms previous solutions to this problem.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a CNN-based technique to estimate high-dynamic range outdoor illumination from a single low dynamic range image. To train the CNN, we leverage a large dataset of outdoor panoramas. We fit a low-dimensional physically-based outdoor illumination model to the skies in these panoramas giving us a compact set of parameters (including sun position, atmospheric conditions, and camera parameters). We extract limited field-of-view images from the panoramas, and train a CNN with this large set of input image-output lighting parameter pairs. Given a test image, this network can be used to infer illumination parameters that can, in turn, be used to reconstruct an outdoor illumination environment map. We demonstrate that our approach allows the recovery of plausible illumination conditions and enables photorealistic virtual object insertion from a single image. An extensive evaluation on both the panorama dataset and captured HDR environment maps shows that our technique significantly outperforms previous solutions to this problem.",
"fno": "0457c373",
"keywords": [
"Cellular Neural Nets",
"Image Processing",
"Deep Outdoor Illumination Estimation",
"CNN",
"High Dynamic Range Outdoor Illumination",
"Single Low Dynamic Range Image",
"Outdoor Panoramas",
"Atmospheric Conditions",
"Camera Parameters",
"Field Of View Images",
"Input Image Output Lighting Parameter Pairs",
"Illumination Parameters",
"Outdoor Illumination Environment Map",
"Plausible Illumination Conditions",
"Sun Position",
"Lighting",
"Sun",
"Atmospheric Modeling",
"Cameras",
"Dynamic Range",
"Estimation"
],
"authors": [
{
"affiliation": null,
"fullName": "Yannick Hold-Geoffroy",
"givenName": "Yannick",
"surname": "Hold-Geoffroy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kalyan Sunkavalli",
"givenName": "Kalyan",
"surname": "Sunkavalli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sunil Hadap",
"givenName": "Sunil",
"surname": "Hadap",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Emiliano Gambaretto",
"givenName": "Emiliano",
"surname": "Gambaretto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jean-François Lalonde",
"givenName": "Jean-François",
"surname": "Lalonde",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "2373-2382",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0457c363",
"articleId": "12OmNAoUTip",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0457c383",
"articleId": "12OmNyeECEu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2014/7000/1/7000a131",
"title": "Lighting Estimation in Outdoor Image Collections",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a131/12OmNBdJ5j1",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2015/8667/0/07168379",
"title": "What Is a Good Day for Outdoor Photometric Stereo?",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2015/07168379/12OmNBmf3by",
"parentPublication": {
"id": "proceedings/iccp/2015/8667/0",
"title": "2015 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a028",
"title": "x-Hour Outdoor Photometric Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a028/12OmNCesr5K",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459163",
"title": "Estimating natural illumination from a single outdoor image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459163/12OmNrJiD1c",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032e529",
"title": "Learning High Dynamic Range from Outdoor Panoramas",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e529/12OmNwHz09M",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5293",
"title": "Hierarchical Disentangled Representation Learning for Outdoor Illumination Estimation and Editing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5293/1BmEEvuyjUQ",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09887904",
"title": "Fast and Accurate Illumination Estimation Using LDR Panoramic Images for Realistic Rendering",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09887904/1GBRnHyZ1bW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/06/08943966",
"title": "Single Day Outdoor Photometric Stereo",
"doi": null,
"abstractUrl": "/journal/tp/2021/06/08943966/1g6v5tld7IQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300g920",
"title": "Deep Sky Modeling for Single Image Outdoor Lighting Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300g920/1gyrdbEY2sM",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0150",
"title": "All-Weather Deep Outdoor Lighting Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0150/1gyrg6Ricuc",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxuGbpWa4",
"doi": "10.1109/VRW50115.2020.00202",
"title": "Real-time Illumination Estimation for Mixed Reality on Mobile Devices",
"normalizedTitle": "Real-time Illumination Estimation for Mixed Reality on Mobile Devices",
"abstract": "We present a lightweight lighting estimation method for the purpose of realistic mixed reality (MR) on mobile devices. Given a single RGB image, our method estimates the environment lighting and renders the virtual object in real-time. While previous works tackled this problem by reconstructing the complicated high dynamic range (HDR) environment maps, our deep neural network directly infers the corresponding spherical harmonics in extensive environments. Compared to previous approaches, our method is more robust and efficient as it works for both indoor and outdoor scenes in real-time. Experiments show that our approach achieves realistic rendering in various MR scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a lightweight lighting estimation method for the purpose of realistic mixed reality (MR) on mobile devices. Given a single RGB image, our method estimates the environment lighting and renders the virtual object in real-time. While previous works tackled this problem by reconstructing the complicated high dynamic range (HDR) environment maps, our deep neural network directly infers the corresponding spherical harmonics in extensive environments. Compared to previous approaches, our method is more robust and efficient as it works for both indoor and outdoor scenes in real-time. Experiments show that our approach achieves realistic rendering in various MR scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a lightweight lighting estimation method for the purpose of realistic mixed reality (MR) on mobile devices. Given a single RGB image, our method estimates the environment lighting and renders the virtual object in real-time. While previous works tackled this problem by reconstructing the complicated high dynamic range (HDR) environment maps, our deep neural network directly infers the corresponding spherical harmonics in extensive environments. Compared to previous approaches, our method is more robust and efficient as it works for both indoor and outdoor scenes in real-time. Experiments show that our approach achieves realistic rendering in various MR scenarios.",
"fno": "09090568",
"keywords": [
"Lighting",
"Virtual Reality",
"Real Time Systems",
"Mobile Handsets",
"Estimation",
"Robustness",
"Image Reconstruction",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interface",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Northwestern Polytechnical University,National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, School of Computer Science,China",
"fullName": "Di Xu",
"givenName": "Di",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University,National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, School of Computer Science,China",
"fullName": "Zhen Li",
"givenName": "Zhen",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University,National Engineering Laboratory for Integrated Aero-Space-Ground-Ocean Big Data Application Technology, School of Computer Science,China",
"fullName": "Yanning Zhang",
"givenName": "Yanning",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "702-703",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090637",
"articleId": "1jIxxaGSFRm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090561",
"articleId": "1jIxzrl7CPm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a037",
"title": "A Single Camera Image Based Approach for Glossy Reflections in Mixed Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a037/12OmNrJAdMm",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a082",
"title": "Learning Lightprobes for Mixed Reality Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a082/12OmNwGqBn3",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a192",
"title": "[POSTER] Illumination Estimation Using Cast Shadows for Realistic Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a192/12OmNxX3uLh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802055",
"title": "Global illumination for Augmented Reality on mobile phones",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802055/12OmNyRg4FC",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07829404",
"title": "MR360: Mixed Reality Rendering for 360° Panoramic Videos",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07829404/13rRUwhHcQW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2018/7315/0/731500a138",
"title": "Glossy Reflections for Mixed Reality Environments on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2018/731500a138/17D45Wda7hc",
"parentPublication": {
"id": "proceedings/cw/2018/7315/0",
"title": "2018 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699293",
"title": "Probeless and Realistic Mixed Reality Application in Presence of Dynamic Light Sources",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699293/19F1LW7sJEc",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09887904",
"title": "Fast and Accurate Illumination Estimation Using LDR Panoramic Images for Realistic Rendering",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09887904/1GBRnHyZ1bW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300f911",
"title": "DeepLight: Learning Illumination for Unconstrained Mobile Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300f911/1gys1sM88yA",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998303",
"title": "Physically-inspired Deep Light Estimation from a Homogeneous-Material Object for Mixed Reality Lighting",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998303/1hrXfo1lGb6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwwMf3H",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismarw",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAoDhRV",
"doi": "10.1109/ISMAR-Adjunct.2016.0081",
"title": "PoLAR: A Portable Library for Augmented Reality",
"normalizedTitle": "PoLAR: A Portable Library for Augmented Reality",
"abstract": "We present here a novel cross-platform library to facilitate research and development applications dealing with augmented reality (AR). Features include 2D and 3D objects visualization and interaction, camera flow and image manipulation, and soft-body deformation. Our aim is to provide computer vision specialists' with tools to facilitate AR application development by providing easy and state of the art access to GUI creation, visualization and hardware management.We demonstrate both the simplicity and the efficiency of coding AR applications through three detailed examples. PoLAR can be downloaded at http://polar.inria.fr and is distributed under the GPL licence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present here a novel cross-platform library to facilitate research and development applications dealing with augmented reality (AR). Features include 2D and 3D objects visualization and interaction, camera flow and image manipulation, and soft-body deformation. Our aim is to provide computer vision specialists' with tools to facilitate AR application development by providing easy and state of the art access to GUI creation, visualization and hardware management.We demonstrate both the simplicity and the efficiency of coding AR applications through three detailed examples. PoLAR can be downloaded at http://polar.inria.fr and is distributed under the GPL licence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present here a novel cross-platform library to facilitate research and development applications dealing with augmented reality (AR). Features include 2D and 3D objects visualization and interaction, camera flow and image manipulation, and soft-body deformation. Our aim is to provide computer vision specialists' with tools to facilitate AR application development by providing easy and state of the art access to GUI creation, visualization and hardware management.We demonstrate both the simplicity and the efficiency of coding AR applications through three detailed examples. PoLAR can be downloaded at http://polar.inria.fr and is distributed under the GPL licence.",
"fno": "07836502",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Digital Libraries",
"Graphical User Interfaces",
"Image Processing",
"Research And Development",
"Software Tools",
"Solid Modelling",
"3 D Models",
"Hardware Management",
"GUI Creation",
"AR Application Development Tools",
"Computer Vision Specialists",
"Soft Body Deformation",
"Image Manipulation",
"Camera Flow",
"Objects Interaction",
"3 D Objects Visualization",
"2 D Objects Visualization",
"Research And Development Applications",
"Cross Platform Library",
"Augmented Reality",
"Portable Library",
"Po LAR",
"Three Dimensional Displays",
"Libraries",
"Cameras",
"Two Dimensional Displays",
"Engines",
"Augmented Reality",
"Graphical User Interfaces",
"1 3 4 Computer Graphics Graphics Utilities Application Packages I 4 9 Image Processing And Computer Vision Applications I 6 8 Simulation And Modeling Types Of Simulation Animation"
],
"authors": [
{
"affiliation": null,
"fullName": "Pierre-Jean Petitprez",
"givenName": "Pierre-Jean",
"surname": "Petitprez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Erwan Kerrien",
"givenName": "Erwan",
"surname": "Kerrien",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pierre-Frederic Villard",
"givenName": "Pierre-Frederic",
"surname": "Villard",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "227-230",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3740-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07836501",
"articleId": "12OmNBpmDJC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07836503",
"articleId": "12OmNz6iOJa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a120",
"title": "[POSTER] Design Guidelines for Generating Augmented Reality Instructions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a120/12OmNAle6zC",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836446",
"title": "Mobile Guide to Augmented Reality for Campus of the Autonomous University of Nayarit",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836446/12OmNAmmuP2",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2018/6049/0/604901a372",
"title": "GeoSolvAR: Augmented Reality Based Solution for Visualizing 3D Solids",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a372/12OmNwMobbg",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/08/08611113",
"title": "MARVisT: Authoring Glyph-Based Visualization in Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/08/08611113/17D45Wuc367",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798358",
"title": "In-Situ Labeling for Augmented Reality Language Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798358/1cJ0VFN6eIw",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2020/03/08978600",
"title": "Virtual and Augmented Reality Applications to Support Data Analysis and Assessment of Science and Engineering",
"doi": null,
"abstractUrl": "/magazine/cs/2020/03/08978600/1haUwHHeDew",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090420",
"title": "A Usability Assessment Of Augmented Situated Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090420/1jIxvndUVYA",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09223669",
"title": "Personal Augmented Reality for Information Visualization on Large Interactive Displays",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09223669/1nV6cy8Xk5i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a203",
"title": "Industrial Augmented Reality: 3D-Content Editor for Augmented Reality Maintenance Worker Support System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a203/1pBMigKK7F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a454",
"title": "Augmented Reality with Maps for Off-Screen POI Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a454/1rSR7Fgh4qc",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.