data
dict |
|---|
{
"proceeding": {
"id": "12OmNrAdsuf",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAfPIP9",
"doi": "10.1109/ISMAR.2015.31",
"title": "[POSTER] RGB-D/C-arm Calibration and Application in Medical Augmented Reality",
"normalizedTitle": "[POSTER] RGB-D/C-arm Calibration and Application in Medical Augmented Reality",
"abstract": "Calibration and registration are the first steps for augmented reality and mixed reality applications. In the medical field, the calibration between an RGB-D camera and a mobile C-arm fluoroscope is a new topic which introduces challenges. In this paper, we propose a precise 3D/2D calibration method to achieve a video augmented fluoroscope. With the design of a suitable calibration phantom for RGB-D/C-arm calibration, we calculate the projection matrix from the depth camera coordinates to the X-ray image. Through a comparison experiment by combining different steps leading to the calibration, we evaluate the effect of every step of our calibration process. Results demonstrated that we obtain a calibration RMS error of 0.54±1.40 mm which is promising for surgical applications. We conclude this paper by showcasing two clinical applications. One is a markerless registration application, the other is an RGB-D camera augmented mobile C-arm visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Calibration and registration are the first steps for augmented reality and mixed reality applications. In the medical field, the calibration between an RGB-D camera and a mobile C-arm fluoroscope is a new topic which introduces challenges. In this paper, we propose a precise 3D/2D calibration method to achieve a video augmented fluoroscope. With the design of a suitable calibration phantom for RGB-D/C-arm calibration, we calculate the projection matrix from the depth camera coordinates to the X-ray image. Through a comparison experiment by combining different steps leading to the calibration, we evaluate the effect of every step of our calibration process. Results demonstrated that we obtain a calibration RMS error of 0.54±1.40 mm which is promising for surgical applications. We conclude this paper by showcasing two clinical applications. One is a markerless registration application, the other is an RGB-D camera augmented mobile C-arm visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Calibration and registration are the first steps for augmented reality and mixed reality applications. In the medical field, the calibration between an RGB-D camera and a mobile C-arm fluoroscope is a new topic which introduces challenges. In this paper, we propose a precise 3D/2D calibration method to achieve a video augmented fluoroscope. With the design of a suitable calibration phantom for RGB-D/C-arm calibration, we calculate the projection matrix from the depth camera coordinates to the X-ray image. Through a comparison experiment by combining different steps leading to the calibration, we evaluate the effect of every step of our calibration process. Results demonstrated that we obtain a calibration RMS error of 0.54±1.40 mm which is promising for surgical applications. We conclude this paper by showcasing two clinical applications. One is a markerless registration application, the other is an RGB-D camera augmented mobile C-arm visualization.",
"fno": "7660a100",
"keywords": [
"X Ray Imaging",
"Calibration",
"Three Dimensional Displays",
"Cameras",
"Sensors",
"Biomedical Imaging",
"Distortion"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiang Wang",
"givenName": "Xiang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Severine Habert",
"givenName": "Severine",
"surname": "Habert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Meng Ma",
"givenName": "Meng",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chun-Hao Huang",
"givenName": "Chun-Hao",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pascal Fallavollita",
"givenName": "Pascal",
"surname": "Fallavollita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "100-103",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7660-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7660a096",
"articleId": "12OmNscOUfD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7660a104",
"articleId": "12OmNA0vnRw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671778",
"title": "Single-shot extrinsic calibration of a generically configured RGB-D camera rig from scene constraints",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671778/12OmNAle6AS",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a072",
"title": "[POSTER] Augmenting Mobile C-arm Fluoroscopes via Stereo-RGBD Sensors for Multimodal Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a072/12OmNBpVQdI",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a060",
"title": "[POSTER] Augmented Reality for Radiation Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a060/12OmNwD1pNO",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977432",
"title": "Calibrating Non-overlapping RGB-D Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977432/12OmNxE2mOA",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295101",
"title": "3D reconstruction with mirrors and RGB-D cameras",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295101/12OmNxvO01u",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a204",
"title": "[POSTER] Mixed Reality Support for Orthopaedic Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a204/12OmNzUxOf1",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a345",
"title": "Multimodal Calibration of Portable X-Ray Capture Systems for 3D Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a345/12OmNzdoN5I",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446213",
"title": "A Calibration Method for On-Vehicle AR-HUD System Using Mixed Reality Glasses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446213/13bd1eNNYnr",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546111",
"title": "Spatial Calibration for Thermal-RGB Cameras and Inertial Sensor System",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546111/17D45XwUAML",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j466",
"title": "Calibrated RGB-D Salient Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j466/1yeIpIWFuuI",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCy2L3W",
"title": "Image and Graphics, International Conference on",
"acronym": "icig",
"groupId": "1001790",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCfSqGb",
"doi": "10.1109/ICIG.2007.59",
"title": "A Practical Calibration Method for Multiple Cameras",
"normalizedTitle": "A Practical Calibration Method for Multiple Cameras",
"abstract": "In this paper, we propose a practical factorization-and- position based method for multiple cameras calibration. The method yields a simple calibration means for an arbitrary number of linear projective cameras while maintaining the handiness and flexibility of the original method. A freely moving planar pattern as a calibration object at a few different orientations is only required. All the cameras do not have to see this pattern at all orientations, and only reasonable overlap between camera subgroups is necessary. We divide these cameras into groups according to their positions and orientations first, and then calibrate each camera in the world coordinate system of its own group via a factorization-based method. Common view fields of planar pattern are used to estimate the Euclidean transformation between these world coordinate systems and represent all cameras in a same world coordinate system. Both the intrinsic and extrinsic parameters of cameras can be obtained in a uniform world coordinate system of accuracy to within a pixel.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a practical factorization-and- position based method for multiple cameras calibration. The method yields a simple calibration means for an arbitrary number of linear projective cameras while maintaining the handiness and flexibility of the original method. A freely moving planar pattern as a calibration object at a few different orientations is only required. All the cameras do not have to see this pattern at all orientations, and only reasonable overlap between camera subgroups is necessary. We divide these cameras into groups according to their positions and orientations first, and then calibrate each camera in the world coordinate system of its own group via a factorization-based method. Common view fields of planar pattern are used to estimate the Euclidean transformation between these world coordinate systems and represent all cameras in a same world coordinate system. Both the intrinsic and extrinsic parameters of cameras can be obtained in a uniform world coordinate system of accuracy to within a pixel.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a practical factorization-and- position based method for multiple cameras calibration. The method yields a simple calibration means for an arbitrary number of linear projective cameras while maintaining the handiness and flexibility of the original method. A freely moving planar pattern as a calibration object at a few different orientations is only required. All the cameras do not have to see this pattern at all orientations, and only reasonable overlap between camera subgroups is necessary. We divide these cameras into groups according to their positions and orientations first, and then calibrate each camera in the world coordinate system of its own group via a factorization-based method. Common view fields of planar pattern are used to estimate the Euclidean transformation between these world coordinate systems and represent all cameras in a same world coordinate system. Both the intrinsic and extrinsic parameters of cameras can be obtained in a uniform world coordinate system of accuracy to within a pixel.",
"fno": "29290045",
"keywords": [],
"authors": [
{
"affiliation": "Beijing Institute of Technology, China",
"fullName": "Liuxin Zhang",
"givenName": "Liuxin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology, China",
"fullName": "Bin Li",
"givenName": "Bin",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology, China",
"fullName": "Yunde Jia",
"givenName": "Yunde",
"surname": "Jia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icig",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-08-01T00:00:00",
"pubType": "proceedings",
"pages": "45-50",
"year": "2007",
"issn": null,
"isbn": "0-7695-2929-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "29290039",
"articleId": "12OmNqBKTOG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "29290051",
"articleId": "12OmNxb5hsN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118a454",
"title": "Simultaneous Localization and Calibration: Self-Calibration of Consumer Depth Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a454/12OmNASILPD",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2013/4989/0/4989b027",
"title": "Decoding, Calibration and Rectification for Lenselet-Based Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989b027/12OmNArbG5a",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvs/2006/2506/0/25060024",
"title": "Efficient Vision-Based Calibration for Visual Surveillance Systems with Multiple PTZ Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icvs/2006/25060024/12OmNBBQZup",
"parentPublication": {
"id": "proceedings/icvs/2006/2506/0",
"title": "Fourth IEEE International Conference on Computer Vision Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2000/0662/2/06622520",
"title": "Wide Area Camera Calibration Using Virtual Calibration Objects",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2000/06622520/12OmNBEpnBL",
"parentPublication": {
"id": "proceedings/cvpr/2000/0662/2",
"title": "Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No.PR00662)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciap/2007/2877/0/28770285",
"title": "D-Calib: Calibration Software for Multiple Cameras System",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/2007/28770285/12OmNC8Mszf",
"parentPublication": {
"id": "proceedings/iciap/2007/2877/0",
"title": "2007 14th International Conference on Image Analysis and Processing - ICIAP 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2000/0750/1/07501354",
"title": "A Practical Self-Calibration Method of Rotating and Zooming Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/07501354/12OmNxUv6gz",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220033",
"title": "Linear camera calibration",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220033/12OmNyLiuwo",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477646",
"title": "Geometric calibration for mobile, stereo, autofocus cameras",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477646/12OmNyO8tKJ",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/08/i1322",
"title": "A Variational Approach to Problems in Calibration of Multiple Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2007/08/i1322/13rRUEgs2D4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyqRn7h",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvFHfFC",
"doi": "10.1109/ISMAR.2002.1115087",
"title": "Calibration of a Head-Mounted Projective Display for Augmented Reality Systems",
"normalizedTitle": "Calibration of a Head-Mounted Projective Display for Augmented Reality Systems",
"abstract": "In augmented reality (AR) application, registering a virtual object with its real counterpart accurately and comfortably is one of the basic and challenging issues in the sense that the size, depth, geometry, as well as physical attributes of the virtual objects have to be rendered precisely relative to a physical reference, which is well-known as the calibration or registration problem. This paper presents a systematic calibration process to address static registration issue in a custom-designed augmented reality system, which is based upon the recent advancement of head-mounted projective display (HMPD) technology. Following a concise review of the HMPD concept and system configuration, we present in detail a computational model for the system calibration, describe the calibration procedures to obtain the estimations of the unknown transformations, and include the calibration results, evaluation experiments and results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In augmented reality (AR) application, registering a virtual object with its real counterpart accurately and comfortably is one of the basic and challenging issues in the sense that the size, depth, geometry, as well as physical attributes of the virtual objects have to be rendered precisely relative to a physical reference, which is well-known as the calibration or registration problem. This paper presents a systematic calibration process to address static registration issue in a custom-designed augmented reality system, which is based upon the recent advancement of head-mounted projective display (HMPD) technology. Following a concise review of the HMPD concept and system configuration, we present in detail a computational model for the system calibration, describe the calibration procedures to obtain the estimations of the unknown transformations, and include the calibration results, evaluation experiments and results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In augmented reality (AR) application, registering a virtual object with its real counterpart accurately and comfortably is one of the basic and challenging issues in the sense that the size, depth, geometry, as well as physical attributes of the virtual objects have to be rendered precisely relative to a physical reference, which is well-known as the calibration or registration problem. This paper presents a systematic calibration process to address static registration issue in a custom-designed augmented reality system, which is based upon the recent advancement of head-mounted projective display (HMPD) technology. Following a concise review of the HMPD concept and system configuration, we present in detail a computational model for the system calibration, describe the calibration procedures to obtain the estimations of the unknown transformations, and include the calibration results, evaluation experiments and results.",
"fno": "17810176",
"keywords": [],
"authors": [
{
"affiliation": "University of Illinois at Urbana-Champaign",
"fullName": "Hong Hua",
"givenName": "Hong",
"surname": "Hua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois at Urbana-Champaign",
"fullName": "Chunyu Gao",
"givenName": "Chunyu",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Illinois at Urbana-Champaign",
"fullName": "Narendra Ahuja",
"givenName": "Narendra",
"surname": "Ahuja",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-09-01T00:00:00",
"pubType": "proceedings",
"pages": "176",
"year": "2002",
"issn": null,
"isbn": "0-7695-1781-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "17810169",
"articleId": "12OmNzV70qI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "17810186",
"articleId": "12OmNxdDFS4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802089",
"title": "Quantification of error from system and environmental sources in Optical See-Through head mounted display calibration methods",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802089/12OmNxwncbX",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948481",
"title": "[DEMO] INDICA : Interaction-free display calibration for optical see-through head-mounted displays based on 3D eye localization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948481/12OmNy1SFEx",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920081",
"title": "A Testbed for Precise Registration, Natural Occlusion and Interaction in an Augmented Environment Using a Head-Mounted Projective Display (HMPD)",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920081/12OmNylboC4",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2003/1882/0/18820053",
"title": "Easy Calibration of a Head-Mounted Projective Display for Augmented Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2003/18820053/12OmNyuyaeJ",
"parentPublication": {
"id": "proceedings/vr/2003/1882/0",
"title": "Proceedings IEEE Virtual Reality 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2004/2191/0/21910070",
"title": "Display-Relative Calibration for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2004/21910070/12OmNzA6GPh",
"parentPublication": {
"id": "proceedings/ismar/2004/2191/0",
"title": "Third IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isar/2001/1375/0/13750217",
"title": "Using a Head-Mounted Projective Display in Interactive Augmented Environments",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2001/13750217/12OmNzV70Da",
"parentPublication": {
"id": "proceedings/isar/2001/1375/0",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2011/2183/0/06162910",
"title": "An empiric evaluation of confirmation methods for optical see-through head-mounted display calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06162910/12OmNzwpUfP",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07021939",
"title": "Subjective Evaluation of a Semi-Automatic Optical See-Through Head-Mounted Display Calibration Technique",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07021939/13rRUwInvyB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300e452",
"title": "Assessment of Optical See-Through Head Mounted Display Calibration for Interactive Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300e452/1i5mlch2zny",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzVGcIy",
"title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx76TJI",
"doi": "10.1109/CVPR.1997.609324",
"title": "Calibration of a Structured Light System: A Projective Approach",
"normalizedTitle": "Calibration of a Structured Light System: A Projective Approach",
"abstract": "We present in this paper a novel calibration method that uses cross ratio to compute world points falling onto any given light stripe plane of a structured light system. We show that, by using 4 known non-coplanar sets of 3 collinear world points, the direct 4 x 3 image to-world transformation matrix for each light stripe plane can also be recovered from plane-to-plane homography. Preliminary experiments conducted with a calibration target and a mannequin suggest that this novel calibration method is robust and is applicable to many shape measurement tasks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present in this paper a novel calibration method that uses cross ratio to compute world points falling onto any given light stripe plane of a structured light system. We show that, by using 4 known non-coplanar sets of 3 collinear world points, the direct 4 x 3 image to-world transformation matrix for each light stripe plane can also be recovered from plane-to-plane homography. Preliminary experiments conducted with a calibration target and a mannequin suggest that this novel calibration method is robust and is applicable to many shape measurement tasks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present in this paper a novel calibration method that uses cross ratio to compute world points falling onto any given light stripe plane of a structured light system. We show that, by using 4 known non-coplanar sets of 3 collinear world points, the direct 4 x 3 image to-world transformation matrix for each light stripe plane can also be recovered from plane-to-plane homography. Preliminary experiments conducted with a calibration target and a mannequin suggest that this novel calibration method is robust and is applicable to many shape measurement tasks.",
"fno": "78220225",
"keywords": [],
"authors": [
{
"affiliation": "Signal Processing Research Institute",
"fullName": "D. Huynh",
"givenName": "D.",
"surname": "Huynh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-06-01T00:00:00",
"pubType": "proceedings",
"pages": "225",
"year": "1997",
"issn": "1063-6919",
"isbn": "0-8186-7822-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "78220219",
"articleId": "12OmNC4eSDP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "78220231",
"articleId": "12OmNzWOB95",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxIRxTh",
"doi": "10.1109/ISMAR.2014.6948417",
"title": "Comprehensive workspace calibration for visuo-haptic augmented reality",
"normalizedTitle": "Comprehensive workspace calibration for visuo-haptic augmented reality",
"abstract": "Visuo-haptic augmented reality systems enable users to see and touch digital information that is embedded in the real world. Precise co-location of computer graphics and the haptic stylus is necessary to provide a realistic user experience. PHANToM haptic devices are often used in such systems to provide haptic feedback. They consist of two interlinked joints, whose angles define the position of the haptic stylus and three sensors at the gimbal to sense its orientation. Previous work has focused on calibration procedures that align the haptic workspace within a global reference coordinate system and developing algorithms that compensate the non-linear position error, caused by inaccuracies in the joint angle sensors. In this paper, we present an improved workspace calibration that additionally compensates for errors in the gimbal sensors. This enables us to also align the orientation of the haptic stylus with high precision. To reduce the required time for calibration and to increase the sampling coverage, we utilize time-delay estimation to temporally align external sensor readings. This enables users to continuously move the haptic stylus during the calibration process, as opposed to commonly used point and hold processes. We conducted an evaluation of the calibration procedure for visuo-haptic augmented reality setups with two different PHANToMs and two different optical trackers. Our results show a significant improvement of orientation alignment for both setups over the previous state of the art calibration procedure. Improved position and orientation accuracy results in higher fidelity visual and haptic augmentations, which is crucial for fine-motor tasks in areas including medical training simulators, assembly planning tools, or rapid prototyping applications. A user friendly calibration procedure is essential for real-world applications of VHAR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visuo-haptic augmented reality systems enable users to see and touch digital information that is embedded in the real world. Precise co-location of computer graphics and the haptic stylus is necessary to provide a realistic user experience. PHANToM haptic devices are often used in such systems to provide haptic feedback. They consist of two interlinked joints, whose angles define the position of the haptic stylus and three sensors at the gimbal to sense its orientation. Previous work has focused on calibration procedures that align the haptic workspace within a global reference coordinate system and developing algorithms that compensate the non-linear position error, caused by inaccuracies in the joint angle sensors. In this paper, we present an improved workspace calibration that additionally compensates for errors in the gimbal sensors. This enables us to also align the orientation of the haptic stylus with high precision. To reduce the required time for calibration and to increase the sampling coverage, we utilize time-delay estimation to temporally align external sensor readings. This enables users to continuously move the haptic stylus during the calibration process, as opposed to commonly used point and hold processes. We conducted an evaluation of the calibration procedure for visuo-haptic augmented reality setups with two different PHANToMs and two different optical trackers. Our results show a significant improvement of orientation alignment for both setups over the previous state of the art calibration procedure. Improved position and orientation accuracy results in higher fidelity visual and haptic augmentations, which is crucial for fine-motor tasks in areas including medical training simulators, assembly planning tools, or rapid prototyping applications. A user friendly calibration procedure is essential for real-world applications of VHAR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visuo-haptic augmented reality systems enable users to see and touch digital information that is embedded in the real world. Precise co-location of computer graphics and the haptic stylus is necessary to provide a realistic user experience. PHANToM haptic devices are often used in such systems to provide haptic feedback. They consist of two interlinked joints, whose angles define the position of the haptic stylus and three sensors at the gimbal to sense its orientation. Previous work has focused on calibration procedures that align the haptic workspace within a global reference coordinate system and developing algorithms that compensate the non-linear position error, caused by inaccuracies in the joint angle sensors. In this paper, we present an improved workspace calibration that additionally compensates for errors in the gimbal sensors. This enables us to also align the orientation of the haptic stylus with high precision. To reduce the required time for calibration and to increase the sampling coverage, we utilize time-delay estimation to temporally align external sensor readings. This enables users to continuously move the haptic stylus during the calibration process, as opposed to commonly used point and hold processes. We conducted an evaluation of the calibration procedure for visuo-haptic augmented reality setups with two different PHANToMs and two different optical trackers. Our results show a significant improvement of orientation alignment for both setups over the previous state of the art calibration procedure. Improved position and orientation accuracy results in higher fidelity visual and haptic augmentations, which is crucial for fine-motor tasks in areas including medical training simulators, assembly planning tools, or rapid prototyping applications. A user friendly calibration procedure is essential for real-world applications of VHAR.",
"fno": "06948417",
"keywords": [
"Calibration",
"Haptic Interfaces",
"Sensors",
"Joints",
"Phantoms",
"Target Tracking",
"Visualization",
"User Interfaces Haptic I O",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial",
"Augmented And Virtual Realities",
"H 5 2 Information Interfaces And Presentation"
],
"authors": [
{
"affiliation": null,
"fullName": "Ulrich Eck",
"givenName": "Ulrich",
"surname": "Eck",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Frieder Pankratz",
"givenName": "Frieder",
"surname": "Pankratz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christian Sandor",
"givenName": "Christian",
"surname": "Sandor",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gudrun Klinker",
"givenName": "Gudrun",
"surname": "Klinker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hamid Laga",
"givenName": "Hamid",
"surname": "Laga",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "123-128",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948416",
"articleId": "12OmNyqRnma",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948418",
"articleId": "12OmNwtEEOk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2007/3005/0/04390938",
"title": "MHaptic : a Haptic Manipulation Library for Generic Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2007/04390938/12OmNASraZe",
"parentPublication": {
"id": "proceedings/cw/2007/3005/0",
"title": "2007 International Conference on Cyberworlds (CW'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948475",
"title": "[DEMO] Comprehensive workspace calibration for visuo-haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948475/12OmNBBzocN",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504708",
"title": "Exploring the perception of co-location errors during tool interaction in visuo-haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504708/12OmNrAMENI",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2007/3005/0/30050003",
"title": "Visuo-Haptic Interface for Hair",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2007/30050003/12OmNwEJ12y",
"parentPublication": {
"id": "proceedings/cw/2007/3005/0",
"title": "2007 International Conference on Cyberworlds (CW'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444753",
"title": "Evaluating haptic feedback in virtual environments using ISO 9241 -- 9",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444753/12OmNyRPgrg",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/04/tth2011040321",
"title": "Collocation Accuracy of Visuo-Haptic System: Metrics and Calibration",
"doi": null,
"abstractUrl": "/journal/th/2011/04/tth2011040321/13rRUxASuhM",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010159",
"title": "Visuo-Haptic Mixed Reality with Unobstructed Tool-Hand Integration",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010159/13rRUyeTVi1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/12/07272121",
"title": "Precise Haptic Device Co-Location for Visuo-Haptic Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2015/12/07272121/13rRUygT7fe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartiot/2020/6514/0/09191998",
"title": "Dynamic and Accurate Force Feedback for Electromagnetic Haptic Display",
"doi": null,
"abstractUrl": "/proceedings-article/smartiot/2020/09191998/1n0Iu1Fpx1m",
"parentPublication": {
"id": "proceedings/smartiot/2020/6514/0",
"title": "2020 IEEE International Conference on Smart Internet of Things (SmartIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09665216",
"title": "HaptoMapping: Visuo-Haptic Augmented Reality by Embedding User-Imperceptible Tactile Display Control Signals in a Projected Image",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09665216/1zJiKwg69PO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzTH0GA",
"title": "Applications of Computer Vision and the IEEE Workshop on Motion and Video Computing, IEEE Workshop on",
"acronym": "wacv-motion",
"groupId": "1000040",
"volume": "1",
"displayVolume": "1",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzSyCbA",
"doi": "10.1109/ACVMOT.2005.102",
"title": "Requirements for Camera Calibration: Must Accuracy Come with a High Price?",
"normalizedTitle": "Requirements for Camera Calibration: Must Accuracy Come with a High Price?",
"abstract": "Since a large number of vision applications rely on the mapping between 3D scenes and their corresponding 2D camera images, an important practical consideration for researchers is, what are the major determinants of camera calibration accuracy and what accuracy can be achieved within the practical limits of their environments. In response, we present a thorough study investigating the effects of training data quantity, measurement error, pixel coordinate noise, and the choice of camera model, on camera calibration results. Through this effort, we seek to determine whether expensive, elaborate setups are necessary, or indeed, beneficial, to camera calibration, and whether a high complexity camera model leads to improved accuracy. The results are first provided for a simulated camera system and then verified through carefully controlled experiments using real-world measurements.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Since a large number of vision applications rely on the mapping between 3D scenes and their corresponding 2D camera images, an important practical consideration for researchers is, what are the major determinants of camera calibration accuracy and what accuracy can be achieved within the practical limits of their environments. In response, we present a thorough study investigating the effects of training data quantity, measurement error, pixel coordinate noise, and the choice of camera model, on camera calibration results. Through this effort, we seek to determine whether expensive, elaborate setups are necessary, or indeed, beneficial, to camera calibration, and whether a high complexity camera model leads to improved accuracy. The results are first provided for a simulated camera system and then verified through carefully controlled experiments using real-world measurements.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Since a large number of vision applications rely on the mapping between 3D scenes and their corresponding 2D camera images, an important practical consideration for researchers is, what are the major determinants of camera calibration accuracy and what accuracy can be achieved within the practical limits of their environments. In response, we present a thorough study investigating the effects of training data quantity, measurement error, pixel coordinate noise, and the choice of camera model, on camera calibration results. Through this effort, we seek to determine whether expensive, elaborate setups are necessary, or indeed, beneficial, to camera calibration, and whether a high complexity camera model leads to improved accuracy. The results are first provided for a simulated camera system and then verified through carefully controlled experiments using real-world measurements.",
"fno": "227110356",
"keywords": [],
"authors": [
{
"affiliation": "McGill University, Montreal, Canada",
"fullName": "Wei Sun",
"givenName": "Wei",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "McGill University, Montreal, Canada",
"fullName": "Jeremy R. Cooperstock",
"givenName": "Jeremy R.",
"surname": "Cooperstock",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv-motion",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-01-01T00:00:00",
"pubType": "proceedings",
"pages": "356-361",
"year": "2005",
"issn": null,
"isbn": "0-7695-2271-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "227110350",
"articleId": "12OmNwDSdGB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "227110364",
"articleId": "12OmNyvY9zS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2007/2929/0/29290007",
"title": "Calibration Accuracy Evaluation with Stereo Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2007/29290007/12OmNAlNiK6",
"parentPublication": {
"id": "proceedings/icig/2007/2929/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2003/2006/0/20060151",
"title": "Miniaturization, Calibration & Accuracy Evaluation of a Hybrid Self-Tracker",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2003/20060151/12OmNBhpS86",
"parentPublication": {
"id": "proceedings/ismar/2003/2006/0",
"title": "The Second IEEE and ACM International Symposium on Mixed and Augmented Reality, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2003/1950/2/195021418",
"title": "Camera Calibration with Known Rotation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2003/195021418/12OmNCwlaky",
"parentPublication": {
"id": "proceedings/iccv/2003/1950/2",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2009/3887/0/pid978796",
"title": "Automatic Calibration Method Based on Traditional Camera Calibration Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2009/pid978796/12OmNyL0TsP",
"parentPublication": {
"id": "proceedings/icise/2009/3887/0",
"title": "Information Science and Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223181",
"title": "Accuracy assessment on camera calibration method not considering lens distortion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223181/12OmNzsrwf5",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2009/02/ttp2009020376",
"title": "High-Accuracy and Robust Localization of Large Control Markers for Geometric Camera Calibration",
"doi": null,
"abstractUrl": "/journal/tp/2009/02/ttp2009020376/13rRUwwJWGT",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1992/10/i0965",
"title": "Camera Calibration with Distortion Models and Accuracy Evaluation",
"doi": null,
"abstractUrl": "/journal/tp/1992/10/i0965/13rRUyeTViU",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iarce/2022/5440/0/544000a024",
"title": "A High-accuracy Camera Calibration Method Based on Special Circular Target",
"doi": null,
"abstractUrl": "/proceedings-article/iarce/2022/544000a024/1KOvjxKFLaw",
"parentPublication": {
"id": "proceedings/iarce/2022/5440/0",
"title": "2022 International Conference on Industrial Automation, Robotics and Control Engineering (IARCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a414",
"title": "Spatial-Temporal Codec Accuracy Calibration for Multi-scale Giga-Pixel Macroscope",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a414/1cJ0Bjly2wo",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwseER6",
"title": "Conference Record of the Twenty-Fifth Asilomar Conference on Signals, Systems & Computers",
"acronym": "acssc",
"groupId": "1000671",
"volume": "0",
"displayVolume": "0",
"year": "1991",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzZWbHr",
"doi": "10.1109/ACSSC.1991.186575",
"title": "Optimum active array shape calibration",
"normalizedTitle": "Optimum active array shape calibration",
"abstract": "The authors examine several array shape calibration algorithms based on maximum likelihood and eigenstructure methods using known calibration source signals. A compact expression for the corresponding Cramer-Rao bound (CRB) on the sensor location parameters is presented. For uncorrelated calibrating sources, a necessary and sufficient condition for the optimal choice of bearings that minimize the CRB is derived. Asymptotic covariances of the estimation errors for the calibration algorithms are also presented and compared with the CRB.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors examine several array shape calibration algorithms based on maximum likelihood and eigenstructure methods using known calibration source signals. A compact expression for the corresponding Cramer-Rao bound (CRB) on the sensor location parameters is presented. For uncorrelated calibrating sources, a necessary and sufficient condition for the optimal choice of bearings that minimize the CRB is derived. Asymptotic covariances of the estimation errors for the calibration algorithms are also presented and compared with the CRB.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors examine several array shape calibration algorithms based on maximum likelihood and eigenstructure methods using known calibration source signals. A compact expression for the corresponding Cramer-Rao bound (CRB) on the sensor location parameters is presented. For uncorrelated calibrating sources, a necessary and sufficient condition for the optimal choice of bearings that minimize the CRB is derived. Asymptotic covariances of the estimation errors for the calibration algorithms are also presented and compared with the CRB.",
"fno": "00186575",
"keywords": [
"Antenna Arrays",
"Calibration",
"Eigenvalues And Eigenfunctions",
"Signal Processing",
"Asymptotic Covariances",
"MEM",
"Maximum Likelihood Method",
"Active Array",
"Array Shape Calibration Algorithms",
"Eigenstructure Methods",
"Calibration Source Signals",
"Cramer Rao Bound",
"CRB",
"Sensor Location Parameters",
"Uncorrelated Calibrating Sources",
"Bearings",
"Estimation Errors",
"Shape",
"Calibration",
"Sensor Arrays",
"Stochastic Processes",
"Lakes",
"Signal Processing",
"Sonar Equipment",
"Array Signal Processing",
"Force Sensors",
"USA Councils"
],
"authors": [
{
"affiliation": "Defence Sci. Organ., Singapore",
"fullName": "B.C. Ng",
"givenName": "B.C.",
"surname": "Ng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "A. Nehorai",
"givenName": "A.",
"surname": "Nehorai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acssc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1991-01-01T00:00:00",
"pubType": "proceedings",
"pages": "893,894,895,896,897",
"year": "1991",
"issn": "1058-6393",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00186574",
"articleId": "12OmNxaeu1Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00186576",
"articleId": "12OmNAle6WG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/1988/9999/0/00197201",
"title": "Eigenstructure methods for direction finding with sensor gain and phase uncertainties",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00197201/12OmNqBbHCC",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00197264",
"title": "Calibration-free bearing estimation for arrays with randomly perturbed sensor locations",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00197264/12OmNvnfkhB",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iitsi/2010/4020/0/4020a226",
"title": "A PSO Based Array Shape Calibration Utilizing Cyclostationarity",
"doi": null,
"abstractUrl": "/proceedings-article/iitsi/2010/4020a226/12OmNwFzO2e",
"parentPublication": {
"id": "proceedings/iitsi/2010/4020/0",
"title": "Intelligent Information Technology and Security Informatics, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1993/0946/4/00319584",
"title": "Active array sensor location calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1993/00319584/12OmNwtn3t8",
"parentPublication": {
"id": "proceedings/icassp/1993/0946/4",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1996/7258/0/72580708",
"title": "Active Intrinsic Calibration Using Vanishing Points",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1996/72580708/12OmNx8OuqK",
"parentPublication": {
"id": "proceedings/cvpr/1996/7258/0",
"title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150095",
"title": "Eigenstructure approach for array processing and calibration with general phase and gain perturbations",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150095/12OmNxwENvA",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1993/0946/4/00319658",
"title": "A robust numerical approach for array calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1993/00319658/12OmNyKJids",
"parentPublication": {
"id": "proceedings/icassp/1993/0946/4",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/2/00754613",
"title": "Direction Finding In The Presence Of Mutual Coupling",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754613/12OmNyo1nLN",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/2",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1989/30/2/01201033",
"title": "Array shape calibration using eigenstructure methods",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1989/01201033/12OmNyoiZ0b",
"parentPublication": {
"id": "proceedings/acssc/1989/30/2",
"title": "Twenty-Third Asilomar Conference on Signals, Systems and Computers, 1989.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsap/2009/3594/0/05163860",
"title": "A Fully Digital Background Calibration Technique for Pipeline Analog-to-Digital Converters",
"doi": null,
"abstractUrl": "/proceedings-article/icsap/2009/05163860/12OmNzb7Zm8",
"parentPublication": {
"id": "proceedings/icsap/2009/3594/0",
"title": "Signal Acquisition and Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hVlBp1zhpm",
"doi": "10.1109/ICCV.2019.00158",
"title": "Calibration Wizard: A Guidance System for Camera Calibration Based on Modelling Geometric and Corner Uncertainty",
"normalizedTitle": "Calibration Wizard: A Guidance System for Camera Calibration Based on Modelling Geometric and Corner Uncertainty",
"abstract": "It is well known that the accuracy of a calibration depends strongly on the choice of camera poses from which images of a calibration object are acquired. We present a system - Calibration Wizard - that interactively guides a user towards taking optimal calibration images. For each new image to be taken, the system computes, from all previously acquired images, the pose that leads to the globally maximum reduction of expected uncertainty on intrinsic parameters and then guides the user towards that pose. We also show how to incorporate uncertainty in corner point position in a novel principled manner, for both, calibration and computation of the next best pose. Synthetic and real-world experiments are performed to demonstrate the effectiveness of Calibration Wizard.",
"abstracts": [
{
"abstractType": "Regular",
"content": "It is well known that the accuracy of a calibration depends strongly on the choice of camera poses from which images of a calibration object are acquired. We present a system - Calibration Wizard - that interactively guides a user towards taking optimal calibration images. For each new image to be taken, the system computes, from all previously acquired images, the pose that leads to the globally maximum reduction of expected uncertainty on intrinsic parameters and then guides the user towards that pose. We also show how to incorporate uncertainty in corner point position in a novel principled manner, for both, calibration and computation of the next best pose. Synthetic and real-world experiments are performed to demonstrate the effectiveness of Calibration Wizard.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It is well known that the accuracy of a calibration depends strongly on the choice of camera poses from which images of a calibration object are acquired. We present a system - Calibration Wizard - that interactively guides a user towards taking optimal calibration images. For each new image to be taken, the system computes, from all previously acquired images, the pose that leads to the globally maximum reduction of expected uncertainty on intrinsic parameters and then guides the user towards that pose. We also show how to incorporate uncertainty in corner point position in a novel principled manner, for both, calibration and computation of the next best pose. Synthetic and real-world experiments are performed to demonstrate the effectiveness of Calibration Wizard.",
"fno": "480300b497",
"keywords": [
"Calibration",
"Cameras",
"Computer Vision",
"Geometry",
"Camera Calibration",
"Geometric Uncertainty",
"Corner Uncertainty",
"Camera Poses",
"Corner Point Position",
"Guidance System",
"Calibration Wizard",
"Intrinsic Parameters",
"Calibration",
"Cameras",
"Uncertainty",
"Covariance Matrices",
"Three Dimensional Displays",
"Optimization",
"Computational Modeling"
],
"authors": [
{
"affiliation": "ETH Zurich",
"fullName": "Songyou Peng",
"givenName": "Songyou",
"surname": "Peng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INRIA Grenoble Rhone-Alpes",
"fullName": "Peter Sturm",
"givenName": "Peter",
"surname": "Sturm",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1497-1505",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300b486",
"articleId": "1hVlaOOiw92",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300b506",
"articleId": "1hVlDO1gYuY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a182",
"title": "[POSTER] Efficient Pose Selection for Interactive Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a182/12OmNALUow1",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2013/5053/0/06475056",
"title": "Geometric calibration for a multi-camera-projector system",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06475056/12OmNvBrgGd",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imvip/2011/0230/0/06167880",
"title": "Comparison of Camera Calibration Techniques for a Portable Mobile Mapping System",
"doi": null,
"abstractUrl": "/proceedings-article/imvip/2011/06167880/12OmNwcCIXS",
"parentPublication": {
"id": "proceedings/imvip/2011/0230/0",
"title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032f354",
"title": "Deltille Grids for Geometric Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032f354/12OmNxwENP8",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a957",
"title": "Corner-Based Geometric Calibration of Multi-focus Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a957/12OmNy5R3sS",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477646",
"title": "Geometric calibration for mobile, stereo, autofocus cameras",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477646/12OmNyO8tKJ",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a031",
"title": "Efficient Pose Selection for Interactive Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a031/17D45XH89n0",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icst/2021/6836/0/683600a436",
"title": "Uncertainty-Wizard: Fast and User-Friendly Neural Network Uncertainty Quantification",
"doi": null,
"abstractUrl": "/proceedings-article/icst/2021/683600a436/1tRPbwVMRoc",
"parentPublication": {
"id": "proceedings/icst/2021/6836/0",
"title": "2021 14th IEEE Conference on Software Testing, Verification and Validation (ICST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoias/2021/4195/0/419500a091",
"title": "A Weighted Extrinsic Calibration of a Camera and a LRF",
"doi": null,
"abstractUrl": "/proceedings-article/icoias/2021/419500a091/1wG6axOHToI",
"parentPublication": {
"id": "proceedings/icoias/2021/4195/0",
"title": "2021 4th International Conference on Intelligent Autonomous Systems (ICoIAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900k0119",
"title": "Post-hoc Uncertainty Calibration for Domain Drift Scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900k0119/1yeJvRAEKvC",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqBtiEQ",
"title": "Proceedings ICIAP 2001. 11th International Conference on Image Analysis and Processing",
"acronym": "iciap",
"groupId": "1000346",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAXxWUB",
"doi": "10.1109/ICIAP.2001.957075",
"title": "Contextual Color Quantization Algorithm",
"normalizedTitle": "Contextual Color Quantization Algorithm",
"abstract": "Abstract: In this paper, we propose a heuristic approach to color quantize images with contextual information taken into consideration. The idea is to locate the regions of an image having the greatest need for colors, and allocate more quantization levels to them. We achieve this by scanning the elements of the input image in a way determined by their local intensity and select the color representatives that comprise the colormap according to their local popularity. The overall performance of the color quantization algorithm is evaluated on a representative set of artificial and real world images. The results show a significant image quality improvement compared to some of the other color quantization schemes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract: In this paper, we propose a heuristic approach to color quantize images with contextual information taken into consideration. The idea is to locate the regions of an image having the greatest need for colors, and allocate more quantization levels to them. We achieve this by scanning the elements of the input image in a way determined by their local intensity and select the color representatives that comprise the colormap according to their local popularity. The overall performance of the color quantization algorithm is evaluated on a representative set of artificial and real world images. The results show a significant image quality improvement compared to some of the other color quantization schemes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract: In this paper, we propose a heuristic approach to color quantize images with contextual information taken into consideration. The idea is to locate the regions of an image having the greatest need for colors, and allocate more quantization levels to them. We achieve this by scanning the elements of the input image in a way determined by their local intensity and select the color representatives that comprise the colormap according to their local popularity. The overall performance of the color quantization algorithm is evaluated on a representative set of artificial and real world images. The results show a significant image quality improvement compared to some of the other color quantization schemes.",
"fno": "11830596",
"keywords": [],
"authors": [
{
"affiliation": "The Hong Kong Polytechnic University",
"fullName": "M.P. Yu",
"givenName": "M.P.",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Hong Kong Polytechnic University",
"fullName": "K.C. Lo",
"givenName": "K.C.",
"surname": "Lo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iciap",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-09-01T00:00:00",
"pubType": "proceedings",
"pages": "0596",
"year": "2001",
"issn": null,
"isbn": "0-7695-1183-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "11830589",
"articleId": "12OmNwGZNRN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "11830602",
"articleId": "12OmNyLiuF0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvpw7he",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "1",
"displayVolume": "2",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrIJqqR",
"doi": "10.1109/ICPR.2004.1334251",
"title": "Fast Color Image Quantization using Squared Euclidean Distance of Adjacent Color Points along the Highest Color Variance Axis",
"normalizedTitle": "Fast Color Image Quantization using Squared Euclidean Distance of Adjacent Color Points along the Highest Color Variance Axis",
"abstract": "A new color image quantization algorithm that uses the squared Euclidean distance of adjacent color points along the highest color variance axis is proposed. This algorithm is a hierarchically divisive colormap design technique. Colors are sorted along the axis with the highest variance of color distribution. The squared Euclidean distances between any adjacent colors' along the axis are then used to find the cutting plane that divides a color cell into two subcells with approximately equal quantization errors respect to their centroids. The proposed algorithm is effective and yields a short execution time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A new color image quantization algorithm that uses the squared Euclidean distance of adjacent color points along the highest color variance axis is proposed. This algorithm is a hierarchically divisive colormap design technique. Colors are sorted along the axis with the highest variance of color distribution. The squared Euclidean distances between any adjacent colors' along the axis are then used to find the cutting plane that divides a color cell into two subcells with approximately equal quantization errors respect to their centroids. The proposed algorithm is effective and yields a short execution time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A new color image quantization algorithm that uses the squared Euclidean distance of adjacent color points along the highest color variance axis is proposed. This algorithm is a hierarchically divisive colormap design technique. Colors are sorted along the axis with the highest variance of color distribution. The squared Euclidean distances between any adjacent colors' along the axis are then used to find the cutting plane that divides a color cell into two subcells with approximately equal quantization errors respect to their centroids. The proposed algorithm is effective and yields a short execution time.",
"fno": "212810656",
"keywords": [],
"authors": [
{
"affiliation": "National Institute of Development Administration, Bangkok, Thailand",
"fullName": "Y. Sirisathitkul",
"givenName": "Y.",
"surname": "Sirisathitkul",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Institute of Development Administration, Bangkok, Thailand",
"fullName": "S. Auwatanamongkol",
"givenName": "S.",
"surname": "Auwatanamongkol",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sirindhorn International Institute of Technology, Thammasat University, Bangkok, Thailand",
"fullName": "B. Uyyanonvara",
"givenName": "B.",
"surname": "Uyyanonvara",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-08-01T00:00:00",
"pubType": "proceedings",
"pages": "656-659",
"year": "2004",
"issn": "1051-4651",
"isbn": "0-7695-2128-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "212810652",
"articleId": "12OmNzVoBAF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "212810660",
"articleId": "12OmNy1SFF3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/1994/1775/2/00389554",
"title": "Greedy tree growing for color image quantization",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1994/00389554/12OmNANkonW",
"parentPublication": {
"id": "proceedings/icassp/1994/1775/2",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2014/7978/0/7978a606",
"title": "Does Dehazing Model Preserve Color Information?",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2014/7978a606/12OmNBRbku9",
"parentPublication": {
"id": "proceedings/sitis/2014/7978/0",
"title": "2014 Tenth International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a298",
"title": "Beyond White: Ground Truth Colors for Color Constancy Correction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a298/12OmNqBKTY8",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1998/8295/0/82950839",
"title": "Bilateral Filtering for Gray and Color Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1998/82950839/12OmNrAv3LF",
"parentPublication": {
"id": "proceedings/iccv/1998/8295/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/2/215820946",
"title": "Color Lines: Image Specific Color Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/215820946/12OmNrJAdNK",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/2",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206645",
"title": "Color estimation from a single surface color",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206645/12OmNwFidbA",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/2/01315267",
"title": "Color lines: image specific color representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315267/12OmNwKoZgv",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/2",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichit/2006/2674/1/267410409",
"title": "A Color Correction System using a Color Compensation Chart",
"doi": null,
"abstractUrl": "/proceedings-article/ichit/2006/267410409/12OmNwvDQuT",
"parentPublication": {
"id": "proceedings/ichit/2006/2674/1",
"title": "2006 International Conference on Hybrid Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341073",
"title": "Temporal-color space analysis of reflection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341073/12OmNxGja4w",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichit/2006/2674/1/04021123",
"title": "A Color Correction System using a Color Compensation Chart",
"doi": null,
"abstractUrl": "/proceedings-article/ichit/2006/04021123/12OmNxWLTDY",
"parentPublication": {
"id": "proceedings/ichit/2006/2674/1",
"title": "2006 International Conference on Hybrid Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzYeB3H",
"title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "1992",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvF83ne",
"doi": "10.1109/ICPR.1992.201921",
"title": "A color video image quantization method with stable and efficient color selection capability",
"normalizedTitle": "A color video image quantization method with stable and efficient color selection capability",
"abstract": "Color video images have become a very important media in communication. This has increased the necessity of displaying and handling color video images on various types of computers. In computerized color image processing, most color images are represented by 24 bits per pixel. Such images usually contain a lot of redundancy and require a large amount of space to be stored. Furthermore, they require expensive full color display devices, so that many general-purpose computers which have only colormap display devices are not capable of displaying them. In order to lower the display and the storage cost, color image quantization algorithms are needed to reduce the number of colors in original images. The authors propose a color quantization method for video images which efficiently generates color quantized video images with stable color allocations.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Color video images have become a very important media in communication. This has increased the necessity of displaying and handling color video images on various types of computers. In computerized color image processing, most color images are represented by 24 bits per pixel. Such images usually contain a lot of redundancy and require a large amount of space to be stored. Furthermore, they require expensive full color display devices, so that many general-purpose computers which have only colormap display devices are not capable of displaying them. In order to lower the display and the storage cost, color image quantization algorithms are needed to reduce the number of colors in original images. The authors propose a color quantization method for video images which efficiently generates color quantized video images with stable color allocations.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Color video images have become a very important media in communication. This has increased the necessity of displaying and handling color video images on various types of computers. In computerized color image processing, most color images are represented by 24 bits per pixel. Such images usually contain a lot of redundancy and require a large amount of space to be stored. Furthermore, they require expensive full color display devices, so that many general-purpose computers which have only colormap display devices are not capable of displaying them. In order to lower the display and the storage cost, color image quantization algorithms are needed to reduce the number of colors in original images. The authors propose a color quantization method for video images which efficiently generates color quantized video images with stable color allocations.",
"fno": "00201921",
"keywords": [
"Data Compression",
"Image Coding",
"Video Signals",
"Display Cost",
"Image Coding",
"Data Compression",
"Color Video Image Quantization Method",
"Color Selection Capability",
"Computerized Color Image Processing",
"Storage Cost",
"Stable Color Allocations",
"Quantization",
"Computer Displays",
"Color",
"Layout",
"Pixel",
"Image Storage",
"Image Generation",
"Communication Industry",
"Costs",
"Cameras"
],
"authors": [
{
"affiliation": "Inst. of Ind. Sci., Tokyo Univ., Japan",
"fullName": "Y. Gong",
"givenName": "Y.",
"surname": "Gong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Ind. Sci., Tokyo Univ., Japan",
"fullName": "H. Zen",
"givenName": "H.",
"surname": "Zen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Ind. Sci., Tokyo Univ., Japan",
"fullName": "Y. Ohsawa",
"givenName": "Y.",
"surname": "Ohsawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Ind. Sci., Tokyo Univ., Japan",
"fullName": "M. Sakauchi",
"givenName": "M.",
"surname": "Sakauchi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1992-01-01T00:00:00",
"pubType": "proceedings",
"pages": "33-36",
"year": "1992",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00201920",
"articleId": "12OmNzV70yT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00201922",
"articleId": "12OmNxdm4JN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/1994/1775/2/00389554",
"title": "Greedy tree growing for color image quantization",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1994/00389554/12OmNANkonW",
"parentPublication": {
"id": "proceedings/icassp/1994/1775/2",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1994/1775/2/00389475",
"title": "Address predictive color quantization image compression for multimedia applications",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1994/00389475/12OmNAoUTrq",
"parentPublication": {
"id": "proceedings/icassp/1994/1775/2",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/1/01394205",
"title": "Restoring halftoned color-quantized images with simulated annealing",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394205/12OmNqAU6vU",
"parentPublication": {
"id": "proceedings/icme/2004/8603/1",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031f092",
"title": "Color Image Retrieval Based on Vector Quantization",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031f092/12OmNwnYG1K",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1998/8821/1/882110181",
"title": "The mathematics of color calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1998/882110181/12OmNxGja6s",
"parentPublication": {
"id": "proceedings/icip/1998/8821/1",
"title": "Proceedings of IPCIP'98 International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196694",
"title": "Subband coding of color images using finite state vector quantization",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196694/12OmNyQ7FVP",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iai/2004/8387/0/01300947",
"title": "Using inverse image frequency for perception-based color image quantization",
"doi": null,
"abstractUrl": "/proceedings-article/iai/2004/01300947/12OmNyaoDF1",
"parentPublication": {
"id": "proceedings/iai/2004/8387/0",
"title": "2004 Southwest Symposium on Image Analysis and Interpretation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifita/2009/3600/1/3600a135",
"title": "Adaptive Color Quantization Based on Self-Growing Network",
"doi": null,
"abstractUrl": "/proceedings-article/ifita/2009/3600a135/12OmNzR8CwG",
"parentPublication": {
"id": "proceedings/ifita/2009/3600/3",
"title": "Information Technology and Applications, International Forum on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831811",
"title": "Joint quantization and error diffusion of color images using competitive learning",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831811/12OmNzTYBTo",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sigra/1997/8102/0/00625178",
"title": "Color image quantization by pairwise clustering",
"doi": null,
"abstractUrl": "/proceedings-article/sigra/1997/00625178/12OmNzYwbWL",
"parentPublication": {
"id": "proceedings/sigra/1997/8102/0",
"title": "Proceedings X Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzuIjed",
"title": "Computer Science and Software Engineering, International Conference on",
"acronym": "csse",
"groupId": "1002553",
"volume": "2",
"displayVolume": "2",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzWfoVh",
"doi": "10.1109/CSSE.2008.281",
"title": "An Improved Median-Cut Algorithm of Color Image Quantization",
"normalizedTitle": "An Improved Median-Cut Algorithm of Color Image Quantization",
"abstract": "The main aim of color quantization is to find the best color palette with the least differences between the original image and the quantized one. The study described in this paper proposes an improved median-cut algorithm which, by improving the pre-quantization precision, calculating the cutting position based on variance and searching reversely the colormap, significantly promotes both the speed and quality of the color quantization. The effectiveness of this algorithm has been proved by experiments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The main aim of color quantization is to find the best color palette with the least differences between the original image and the quantized one. The study described in this paper proposes an improved median-cut algorithm which, by improving the pre-quantization precision, calculating the cutting position based on variance and searching reversely the colormap, significantly promotes both the speed and quality of the color quantization. The effectiveness of this algorithm has been proved by experiments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The main aim of color quantization is to find the best color palette with the least differences between the original image and the quantized one. The study described in this paper proposes an improved median-cut algorithm which, by improving the pre-quantization precision, calculating the cutting position based on variance and searching reversely the colormap, significantly promotes both the speed and quality of the color quantization. The effectiveness of this algorithm has been proved by experiments.",
"fno": "3336c943",
"keywords": [
"Quantization",
"Median Cut",
"Octree",
"Neural Network"
],
"authors": [
{
"affiliation": null,
"fullName": "Chen Wei-dong",
"givenName": "Chen",
"surname": "Wei-dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ding Wei",
"givenName": "Ding",
"surname": "Wei",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "943-946",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3336-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04722204",
"articleId": "12OmNBSBk0Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3336c947",
"articleId": "12OmNrAdsG4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciap/2001/1183/0/11830596",
"title": "Contextual Color Quantization Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/2001/11830596/12OmNAXxWUB",
"parentPublication": {
"id": "proceedings/iciap/2001/1183/0",
"title": "Proceedings ICIAP 2001. 11th International Conference on Image Analysis and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/1/01394268",
"title": "Automatic text detection using multi-layer color quantization in complex color images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394268/12OmNBKW9JO",
"parentPublication": {
"id": "proceedings/icme/2004/8603/1",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/1995/7128/2/71280669",
"title": "Color quantization by RWM-cut",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/1995/71280669/12OmNBkfRn0",
"parentPublication": {
"id": "proceedings/icdar/1995/7128/2",
"title": "Proceedings of 3rd International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2004/2128/1/212810656",
"title": "Fast Color Image Quantization using Squared Euclidean Distance of Adjacent Color Points along the Highest Color Variance Axis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2004/212810656/12OmNrIJqqR",
"parentPublication": {
"id": "proceedings/icpr/2004/2128/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/1/00413283",
"title": "Perceptual optimization of DCT color quantization matrices",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413283/12OmNrkT7My",
"parentPublication": {
"id": "proceedings/icip/1994/6952/3",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2920/0/00201921",
"title": "A color video image quantization method with stable and efficient color selection capability",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201921/12OmNvF83ne",
"parentPublication": {
"id": "proceedings/icpr/1992/2920/0",
"title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031f092",
"title": "Color Image Retrieval Based on Vector Quantization",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031f092/12OmNwnYG1K",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a017",
"title": "Impulse Noise Removal from Color Images with Hopfield Neural Network and Improved Vector Median Filter",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a017/12OmNwtWfV5",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2016/5698/0/07907437",
"title": "A New Method for Color Quantization",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2016/07907437/12OmNyQGS9t",
"parentPublication": {
"id": "proceedings/sitis/2016/5698/0",
"title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1995/03/v0274",
"title": "Dynamic Color Quantization of Video Sequences",
"doi": null,
"abstractUrl": "/journal/tg/1995/03/v0274/13rRUxASuSz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1fTgF9x78sw",
"title": "2019 IEEE Visualization Conference (VIS)",
"acronym": "vis",
"groupId": "1001944",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1fTgHHw1pSM",
"doi": "10.1109/VISUAL.2019.8933760",
"title": "Evaluating Gradient Perception in Color-Coded Scalar Fields",
"normalizedTitle": "Evaluating Gradient Perception in Color-Coded Scalar Fields",
"abstract": "Color mapping is a commonly used technique for visualizing scalar fields. While there exists advice for choosing effective colormaps, it is unclear if current guidelines apply equally across task types. We study the perception of gradients and evaluate the effectiveness of three colormaps at depicting gradient magnitudes. In a crowd-sourced experiment, we determine the just-noticeable differences (JNDs) at which participants can reliably compare and judge variations in gradient between two scalar fields. We find that participants exhibited lower JNDs with a diverging (cool-warm) or a spectral (rainbow) scheme, as compared with a monotonic-luminance colormap (viridis). The results support a hypothesis that apparent discontinuities in the color ramp may help viewers discern subtle structural differences in gradient. We discuss these findings and highlight future research directions for colormap evaluation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Color mapping is a commonly used technique for visualizing scalar fields. While there exists advice for choosing effective colormaps, it is unclear if current guidelines apply equally across task types. We study the perception of gradients and evaluate the effectiveness of three colormaps at depicting gradient magnitudes. In a crowd-sourced experiment, we determine the just-noticeable differences (JNDs) at which participants can reliably compare and judge variations in gradient between two scalar fields. We find that participants exhibited lower JNDs with a diverging (cool-warm) or a spectral (rainbow) scheme, as compared with a monotonic-luminance colormap (viridis). The results support a hypothesis that apparent discontinuities in the color ramp may help viewers discern subtle structural differences in gradient. We discuss these findings and highlight future research directions for colormap evaluation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Color mapping is a commonly used technique for visualizing scalar fields. While there exists advice for choosing effective colormaps, it is unclear if current guidelines apply equally across task types. We study the perception of gradients and evaluate the effectiveness of three colormaps at depicting gradient magnitudes. In a crowd-sourced experiment, we determine the just-noticeable differences (JNDs) at which participants can reliably compare and judge variations in gradient between two scalar fields. We find that participants exhibited lower JNDs with a diverging (cool-warm) or a spectral (rainbow) scheme, as compared with a monotonic-luminance colormap (viridis). The results support a hypothesis that apparent discontinuities in the color ramp may help viewers discern subtle structural differences in gradient. We discuss these findings and highlight future research directions for colormap evaluation.",
"fno": "08933760",
"keywords": [
"Data Visualisation",
"Gradient Perception",
"Color Coded Scalar Fields",
"Color Mapping",
"Task Types",
"Gradient Magnitudes",
"Crowd Sourced Experiment",
"Just Noticeable Differences",
"JN Ds",
"Spectral Scheme",
"Monotonic Luminance Colormap",
"Color Ramp",
"Colormap Evaluation",
"Task Analysis",
"Image Color Analysis",
"Visualization",
"Guidelines",
"Color",
"Meteorology",
"Sensitivity",
"Human Centered Computing X 2014 Visualization X 2014",
"Empirical Studies In Visualization"
],
"authors": [
{
"affiliation": "Indiana University–Purdue University Indianapolis",
"fullName": "Khairi Reda",
"givenName": "Khairi",
"surname": "Reda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Argonne National Laboratory & Northern Illinois University",
"fullName": "Michael E. Papka",
"givenName": "Michael E.",
"surname": "Papka",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "271-275",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4941-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08933764",
"articleId": "1fTgICKKw3m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08933671",
"articleId": "1fTgK0Z7gYw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2018/01/08017653",
"title": "The Good, the Bad, and the Ugly: A Theoretical Framework for the Assessment of Continuous Colormaps",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017653/13rRUNvgz9W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/08/07305807",
"title": "A Survey of Colormaps in Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/08/07305807/13rRUwInvBb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/09/08413174",
"title": "Measuring and Modeling the Feature Detection Threshold Functions of Colormaps",
"doi": null,
"abstractUrl": "/journal/tg/2019/09/08413174/13rRUygBw7i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/09/08637778",
"title": "Measuring the Effects of Scalar and Spherical Colormaps on Ensembles of DMRI Tubes",
"doi": null,
"abstractUrl": "/journal/tg/2020/09/08637778/17D45WrVgbO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09919390",
"title": "Rainbow Colormaps: What are they <italic>good</italic> and <italic>bad</italic> for?",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09919390/1HsTAyyKsne",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933764",
"title": "Data-Driven Colormap Optimization for 2D Scalar Field Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933764/1fTgICKKw3m",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08939459",
"title": "The Making of Continuous Colormaps",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08939459/1fZRynxLXGM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09216559",
"title": "A Testing Environment for Continuous Colormaps",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09216559/1nJsOQFe8A8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222327",
"title": "Rainbows Revisited: Modeling Effective Colormap Design for Graphical Inference",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222327/1nTqMLwYD0A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09527154",
"title": "Data-Driven Colormap Adjustment for Exploring Spatial Variations in Scalar Fields",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09527154/1wznUQrR6N2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzcxYVJ",
"title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)",
"acronym": "icis",
"groupId": "1001200",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBV9Ikp",
"doi": "10.1109/ICIS.2017.7960025",
"title": "A flexible finger-mounted airbrush model for immersive freehand painting",
"normalizedTitle": "A flexible finger-mounted airbrush model for immersive freehand painting",
"abstract": "To provide immersive freehand painting experience, we proposed a flexible airbrush model making use of the hands tracking capability of Leap Motion Controller. The airbrush model uses a common screen as the painting canvas. When the user moves hands over the screen, the brush model continually acquires his/her hands movement data and extracts multiple control signals which describes multiple gestures. The virtual airbrush moves along with the user's hands movement as if it is fixed on his/her finger, and its properties change with gestures' change. When the virtual airbrush intersects with the screen, it continually exerts paints onto the screen. User test shows that the user can easily create multifarious brush stroke effects by directly operating over the screen.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To provide immersive freehand painting experience, we proposed a flexible airbrush model making use of the hands tracking capability of Leap Motion Controller. The airbrush model uses a common screen as the painting canvas. When the user moves hands over the screen, the brush model continually acquires his/her hands movement data and extracts multiple control signals which describes multiple gestures. The virtual airbrush moves along with the user's hands movement as if it is fixed on his/her finger, and its properties change with gestures' change. When the virtual airbrush intersects with the screen, it continually exerts paints onto the screen. User test shows that the user can easily create multifarious brush stroke effects by directly operating over the screen.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To provide immersive freehand painting experience, we proposed a flexible airbrush model making use of the hands tracking capability of Leap Motion Controller. The airbrush model uses a common screen as the painting canvas. When the user moves hands over the screen, the brush model continually acquires his/her hands movement data and extracts multiple control signals which describes multiple gestures. The virtual airbrush moves along with the user's hands movement as if it is fixed on his/her finger, and its properties change with gestures' change. When the virtual airbrush intersects with the screen, it continually exerts paints onto the screen. User test shows that the user can easily create multifarious brush stroke effects by directly operating over the screen.",
"fno": "07960025",
"keywords": [
"Brushes",
"Painting",
"Paints",
"Atmospheric Modeling",
"Thumb",
"Tools",
"Aerospace Electronics",
"Brush Modeling",
"Airbrush",
"Paint System",
"Paints Simulation",
"Hands Tracking"
],
"authors": [
{
"affiliation": "Jiangsu Key Laboratory of Media Design and Software Technology, Jiangnan University, Wuxi, China",
"fullName": "Ruimin Lyu",
"givenName": "Ruimin",
"surname": "Lyu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu Key Laboratory of Media Design and Software Technology, Jiangnan University, Wuxi, China",
"fullName": "Yuefeng Ze",
"givenName": "Yuefeng",
"surname": "Ze",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu Key Laboratory of Media Design and Software Technology, Jiangnan University, Wuxi, China",
"fullName": "Wei Chen",
"givenName": "Wei",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu Key Laboratory of Media Design and Software Technology, Jiangnan University, Wuxi, China",
"fullName": "Fei Chen",
"givenName": "Fei",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu Key Laboratory of Media Design and Software Technology, Jiangnan University, Wuxi, China",
"fullName": "Yuan Liu",
"givenName": "Yuan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu Key Laboratory of Media Design and Software Technology, Jiangnan University, Wuxi, China",
"fullName": "Lifang Chen",
"givenName": "Lifang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Jiangsu Key Laboratory of Media Design and Software Technology, Jiangnan University, Wuxi, China",
"fullName": "Haojie Hao",
"givenName": "Haojie",
"surname": "Hao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icis",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2017-05-01T00:00:00",
"pubType": "proceedings",
"pages": "395-400",
"year": "2017",
"issn": null,
"isbn": "978-1-5090-5507-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07960024",
"articleId": "12OmNx76TFf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07960026",
"articleId": "12OmNzt0IwK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/2002/1784/0/17840439",
"title": "On the Effects of Haptic Display in Brush and Ink Simulation for Chinese Painting and Calligraphy",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840439/12OmNApcutB",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480776",
"title": "Cutting, Deforming and Painting of 3D meshes in a Two Handed Viso-haptic VR System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480776/12OmNCwlacX",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a790",
"title": "A Tracking Method for 2D Canvas in MR-Based Interactive Painting System",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a790/12OmNqFrGCH",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mediacom/2010/4136/0/4136a153",
"title": "A Survey of Rendering of Chinese Painting",
"doi": null,
"abstractUrl": "/proceedings-article/mediacom/2010/4136a153/12OmNrJ11w2",
"parentPublication": {
"id": "proceedings/mediacom/2010/4136/0",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050723",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050723/13rRUxBa5bY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042343",
"title": "A Modular Framework for Digital Painting",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042343/13rRUxDIthe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/09/08419282",
"title": "High Relief from Brush Painting",
"doi": null,
"abstractUrl": "/journal/tg/2019/09/08419282/13rRUxcKzVn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798200",
"title": "Panoramic Fluid Painting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798200/1cJ0VsoPxfO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx5GU1K",
"title": "2009 IEEE International Conference on Granular Computing",
"acronym": "grc",
"groupId": "1001626",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyFU7aU",
"doi": "10.1109/GRC.2009.5255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"normalizedTitle": "Non-photorealistic rendering of ink painting style diffusion",
"abstract": "By mixing water and ink properly, ink painting obtains different concentrations to show the contrast of an object. It presents an effective rendering algorithm for ink painting that include contour drawing and interior shading to simulate the Chinese drawing. Paint model consists of water and pigments with different physical and optical characteristics. Paper and brush are modeled by multi-layers for realistic painting simulation. It proposed describes hairy brush behavior by the curving spring model on the base of the mechanical model simulation writing brush, and proposed the rice paper model to simulate by the fiber structure by considering rice paper thickness influence to water ink proliferation, In our simulation, water and pigments are not only transferred passively among paper surface, palette, and brush bristle but are also moved actively inside the paper. Experiment result for rendering example demonstrates this system can render images in impressively good non-photorealistic rendering styles.",
"abstracts": [
{
"abstractType": "Regular",
"content": "By mixing water and ink properly, ink painting obtains different concentrations to show the contrast of an object. It presents an effective rendering algorithm for ink painting that include contour drawing and interior shading to simulate the Chinese drawing. Paint model consists of water and pigments with different physical and optical characteristics. Paper and brush are modeled by multi-layers for realistic painting simulation. It proposed describes hairy brush behavior by the curving spring model on the base of the mechanical model simulation writing brush, and proposed the rice paper model to simulate by the fiber structure by considering rice paper thickness influence to water ink proliferation, In our simulation, water and pigments are not only transferred passively among paper surface, palette, and brush bristle but are also moved actively inside the paper. Experiment result for rendering example demonstrates this system can render images in impressively good non-photorealistic rendering styles.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "By mixing water and ink properly, ink painting obtains different concentrations to show the contrast of an object. It presents an effective rendering algorithm for ink painting that include contour drawing and interior shading to simulate the Chinese drawing. Paint model consists of water and pigments with different physical and optical characteristics. Paper and brush are modeled by multi-layers for realistic painting simulation. It proposed describes hairy brush behavior by the curving spring model on the base of the mechanical model simulation writing brush, and proposed the rice paper model to simulate by the fiber structure by considering rice paper thickness influence to water ink proliferation, In our simulation, water and pigments are not only transferred passively among paper surface, palette, and brush bristle but are also moved actively inside the paper. Experiment result for rendering example demonstrates this system can render images in impressively good non-photorealistic rendering styles.",
"fno": "05255155",
"keywords": [
"Art",
"Rendering Computer Graphics",
"Nonphotorealistic Rendering",
"Ink Painting Style Diffusion",
"Contour Drawing",
"Interior Shading",
"Chinese Drawing Simulation",
"Ink",
"Painting",
"Brushes",
"Rendering Computer Graphics",
"Pigments",
"Paints",
"Water",
"Optical Mixing",
"Springs",
"Writing"
],
"authors": [
{
"affiliation": "Institute of Communications and Information Technology, Zhejiang Gongshang University, Hangzhou, China 310018",
"fullName": "Tianding Chen",
"givenName": "Tianding",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "grc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-08-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2009",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05255154",
"articleId": "12OmNCgJedC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05255152",
"articleId": "12OmNCw3z90",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/2002/1784/0/17840439",
"title": "On the Effects of Haptic Display in Brush and Ink Simulation for Chinese Painting and Calligraphy",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840439/12OmNApcutB",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1995/7062/0/70620098",
"title": "A diffusion model for computer animation of diffuse ink painting",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1995/70620098/12OmNCbU2S0",
"parentPublication": {
"id": "proceedings/ca/1995/7062/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mediacom/2010/4136/0/4136a153",
"title": "A Survey of Rendering of Chinese Painting",
"doi": null,
"abstractUrl": "/proceedings-article/mediacom/2010/4136a153/12OmNrJ11w2",
"parentPublication": {
"id": "proceedings/mediacom/2010/4136/0",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbie/2008/3561/0/3561a090",
"title": "Image Auto-generation of Brush Modeling and Ink Diffusion Chinese Ink Paintin",
"doi": null,
"abstractUrl": "/proceedings-article/fbie/2008/3561a090/12OmNyv7mea",
"parentPublication": {
"id": "proceedings/fbie/2008/3561/0",
"title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2005/2432/0/24320989",
"title": "HUA: An Interactive Calligraphy and Ink-Wash Painting System",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2005/24320989/12OmNywfKzx",
"parentPublication": {
"id": "proceedings/cit/2005/2432/0",
"title": "The Fifth International Conference on Computer and Information Technology CIT 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a158",
"title": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a158/12OmNyxXlwN",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1999/03/mcg1999030074",
"title": "Simulating Oriental Black-Ink Painting",
"doi": null,
"abstractUrl": "/magazine/cg/1999/03/mcg1999030074/13rRUx0gecb",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/i-span/2018/8534/0/853400a193",
"title": "Two-Stage Color ink Painting Style Transfer via Convolution Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/i-span/2018/853400a193/17D45XvMcbn",
"parentPublication": {
"id": "proceedings/i-span/2018/8534/0",
"title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798200",
"title": "Panoramic Fluid Painting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798200/1cJ0VsoPxfO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a284",
"title": "Real-time Rendering of 3D Animal Models in Chinese Ink Painting Style",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a284/1p1grC3XnGw",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrF2DIa",
"title": "2017 21st International Conference Information Visualisation (IV)",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyxXlwN",
"doi": "10.1109/iV.2017.10",
"title": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"normalizedTitle": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"abstract": "We investigate the watercolor brush strokes from the perspective of fluid dynamics and the pigments movement within the flowing water. We propose a method to plausibly simulate process of watercolor painting on rough paper including the diffusion effects on graphics processor. We present a method based on 2D fluid simulation using Navier-Stokes equations to simulate the ink diffusion. Intricate details of water flow are preserved by taking into account the vorticity, too. Paper roughness and structure are considered in our approach when evaluating viscous term. Finally, to compose multiple colored semi-transparent layers of brush strokes Kubelka-Munk theory is used.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We investigate the watercolor brush strokes from the perspective of fluid dynamics and the pigments movement within the flowing water. We propose a method to plausibly simulate process of watercolor painting on rough paper including the diffusion effects on graphics processor. We present a method based on 2D fluid simulation using Navier-Stokes equations to simulate the ink diffusion. Intricate details of water flow are preserved by taking into account the vorticity, too. Paper roughness and structure are considered in our approach when evaluating viscous term. Finally, to compose multiple colored semi-transparent layers of brush strokes Kubelka-Munk theory is used.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We investigate the watercolor brush strokes from the perspective of fluid dynamics and the pigments movement within the flowing water. We propose a method to plausibly simulate process of watercolor painting on rough paper including the diffusion effects on graphics processor. We present a method based on 2D fluid simulation using Navier-Stokes equations to simulate the ink diffusion. Intricate details of water flow are preserved by taking into account the vorticity, too. Paper roughness and structure are considered in our approach when evaluating viscous term. Finally, to compose multiple colored semi-transparent layers of brush strokes Kubelka-Munk theory is used.",
"fno": "0831a158",
"keywords": [
"Art",
"Colour Graphics",
"Computational Fluid Dynamics",
"Diffusion",
"Flow Simulation",
"Navier Stokes Equations",
"Painting",
"Rendering Computer Graphics",
"Vortices",
"Fluid Vorticity",
"Watercolor Brush Strokes",
"Fluid Dynamics",
"Pigments Movement",
"Watercolor Painting",
"Rough Paper",
"Graphics Processor",
"Navier Stokes Equations",
"Ink Diffusion",
"Water Flow",
"Process Simulation",
"Kubelka Munk Theory",
"Real Time Watercolor Simulation",
"Diffusion Effects",
"2 D Fluid Simulation",
"Mathematical Model",
"Pigments",
"Computational Modeling",
"Ink",
"Painting",
"Navier Stokes Equations",
"Two Dimensional Displays",
"Ink Simulation",
"Ink Diffusion",
"Pigment Rendering"
],
"authors": [
{
"affiliation": null,
"fullName": "Roman Ďurikovič",
"givenName": "Roman",
"surname": "Ďurikovič",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zuzana Páleníková",
"givenName": "Zuzana",
"surname": "Páleníková",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "158-163",
"year": "2017",
"issn": "2375-0138",
"isbn": "978-1-5386-0831-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0831a152",
"articleId": "12OmNqH9hmE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0831a164",
"articleId": "12OmNxWLTmv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/mediacom/2010/4136/0/4136a153",
"title": "A Survey of Rendering of Chinese Painting",
"doi": null,
"abstractUrl": "/proceedings-article/mediacom/2010/4136a153/12OmNrJ11w2",
"parentPublication": {
"id": "proceedings/mediacom/2010/4136/0",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2001/1227/0/12270322",
"title": "Non-Photorealistic Rendering Using Watercolor Inspired Textures and Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2001/12270322/12OmNy4r41q",
"parentPublication": {
"id": "proceedings/pg/2001/1227/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbie/2008/3561/0/3561a090",
"title": "Image Auto-generation of Brush Modeling and Ink Diffusion Chinese Ink Paintin",
"doi": null,
"abstractUrl": "/proceedings-article/fbie/2008/3561a090/12OmNyv7mea",
"parentPublication": {
"id": "proceedings/fbie/2008/3561/0",
"title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2006/2687/0/04019925",
"title": "Modeling Scratchiness Effect of Oriental Writing Brush",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2006/04019925/12OmNzV70Gm",
"parentPublication": {
"id": "proceedings/cit/2006/2687/0",
"title": "The Sixth IEEE International Conference on Computer and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446595",
"title": "Fluid Sketching―Immersive Sketching Based on Fluid Flow",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446595/13bd1eOELL3",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050723",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050723/13rRUxBa5bY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2019/5268/0/526800a465",
"title": "A Watercolor Painting Image Generation Using Stroke-Based Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2019/526800a465/1gysHZ4vu8M",
"parentPublication": {
"id": "proceedings/candarw/2019/5268/0",
"title": "2019 Seventh International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2021/2835/0/283500a145",
"title": "A GPU Implementation of Watercolor Painting Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2021/283500a145/1zw5NurwD72",
"parentPublication": {
"id": "proceedings/candarw/2021/2835/0",
"title": "2021 Ninth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0VsoPxfO",
"doi": "10.1109/VR.2019.8798200",
"title": "Panoramic Fluid Painting",
"normalizedTitle": "Panoramic Fluid Painting",
"abstract": "The dynamic motion of fluids is essential in generating aesthetically appealing effects like the oriental ink painting and fluid art. Due to the prohibitively high cost required in volumetric fluid simulations, implementing an interactive volumetric painting system in immersive virtual environments (IVEs) is challenging. We propose a framework to generate immersive fluid dynamic environments by solving the Navier-Stokes equation on the viewing sphere. With this approach, we largely reduce the complexity without losing the effective resolution. We demonstrate our method on a real-time 360-degree painting system and verify the usability of our interface prototype with examples.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The dynamic motion of fluids is essential in generating aesthetically appealing effects like the oriental ink painting and fluid art. Due to the prohibitively high cost required in volumetric fluid simulations, implementing an interactive volumetric painting system in immersive virtual environments (IVEs) is challenging. We propose a framework to generate immersive fluid dynamic environments by solving the Navier-Stokes equation on the viewing sphere. With this approach, we largely reduce the complexity without losing the effective resolution. We demonstrate our method on a real-time 360-degree painting system and verify the usability of our interface prototype with examples.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The dynamic motion of fluids is essential in generating aesthetically appealing effects like the oriental ink painting and fluid art. Due to the prohibitively high cost required in volumetric fluid simulations, implementing an interactive volumetric painting system in immersive virtual environments (IVEs) is challenging. We propose a framework to generate immersive fluid dynamic environments by solving the Navier-Stokes equation on the viewing sphere. With this approach, we largely reduce the complexity without losing the effective resolution. We demonstrate our method on a real-time 360-degree painting system and verify the usability of our interface prototype with examples.",
"fno": "08798200",
"keywords": [
"Art",
"Computational Fluid Dynamics",
"Interactive Systems",
"Navier Stokes Equations",
"Rendering Computer Graphics",
"Solid Modelling",
"Virtual Reality",
"Real Time 360 Degree Painting System",
"Panoramic Fluid",
"Aesthetically Appealing Effects",
"Oriental Ink Painting",
"Fluid Art",
"Volumetric Fluid Simulations",
"Interactive Volumetric Painting System",
"Immersive Virtual Environments",
"Immersive Fluid Dynamic Environments",
"Navier Stokes Equation",
"Painting",
"Brushes",
"Dynamics",
"Computational Modeling",
"Solid Modeling",
"Paints",
"Ink",
"Fluid Simulation",
"Head Mounted Display",
"Panoramic Image",
"Spherical Coordinate",
"Dynamic Painting",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality",
"Animation",
"Physical Simulation",
"Human Centered Computing",
"Interaction Design",
"Interaction Design Process And Methods",
"User Interface Design"
],
"authors": [
{
"affiliation": "College of Arts and Media, Tongji University, Shanghai, China",
"fullName": "Shengyu Du",
"givenName": "Shengyu",
"surname": "Du",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Arts and Media, Tongji University, Shanghai, China",
"fullName": "Ting Ge",
"givenName": "Ting",
"surname": "Ge",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Arts and Media, Tongji University, Shanghai, China",
"fullName": "Jingyi Pei",
"givenName": "Jingyi",
"surname": "Pei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Arts and Media, Tongji University, Shanghai, China",
"fullName": "Jianmin Wang",
"givenName": "Jianmin",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Arts and Media, Tongji University, Shanghai, China",
"fullName": "Changqing Yin",
"givenName": "Changqing",
"surname": "Yin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Arts and Media, Tongji University, Shanghai, China",
"fullName": "Yongning Zhu",
"givenName": "Yongning",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "904-905",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797924",
"articleId": "1cJ0QNLkDOo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798073",
"articleId": "1cJ1cEQE120",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/case/2012/0430/0/06386490",
"title": "Development of semi-automatic painting system for inner hull block structures",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386490/12OmNBDQbmn",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2017/5507/0/07960025",
"title": "A flexible finger-mounted airbrush model for immersive freehand painting",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2017/07960025/12OmNBV9Ikp",
"parentPublication": {
"id": "proceedings/icis/2017/5507/0",
"title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mediacom/2010/4136/0/4136a153",
"title": "A Survey of Rendering of Chinese Painting",
"doi": null,
"abstractUrl": "/proceedings-article/mediacom/2010/4136a153/12OmNrJ11w2",
"parentPublication": {
"id": "proceedings/mediacom/2010/4136/0",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a158",
"title": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a158/12OmNyxXlwN",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2006/2687/0/04019925",
"title": "Modeling Scratchiness Effect of Oriental Writing Brush",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2006/04019925/12OmNzV70Gm",
"parentPublication": {
"id": "proceedings/cit/2006/2687/0",
"title": "The Sixth IEEE International Conference on Computer and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050723",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050723/13rRUxBa5bY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042343",
"title": "A Modular Framework for Digital Painting",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042343/13rRUxDIthe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/07/06766598",
"title": "WYSIWYG Stereo Paintingwith Usability Enhancements",
"doi": null,
"abstractUrl": "/journal/tg/2014/07/06766598/13rRUyeCkah",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1gysE02WJVK",
"title": "2019 Seventh International Symposium on Computing and Networking Workshops (CANDARW)",
"acronym": "candarw",
"groupId": "1829704",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysHZ4vu8M",
"doi": "10.1109/CANDARW.2019.00088",
"title": "A Watercolor Painting Image Generation Using Stroke-Based Rendering",
"normalizedTitle": "A Watercolor Painting Image Generation Using Stroke-Based Rendering",
"abstract": "The main contribution of this work is to propose a watercolor painting image generation using stroke-based rendering. In this work, we use a watercolor model simulating behavior of watercolor to paint a brush stroke. Using this model, we mimic uneven painting of brush stroke similar to the actual watercolor painting. Also, we repeat drawing with a brush while reducing the brush size in the same way as real painting. To paint strokes to the canvas, we use a greedy approach such that the difference an input image and the canvas image is minimized. As a result, we can generate high quality watercolor painting images that closely approximate the input image while having a watercolor painting style.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The main contribution of this work is to propose a watercolor painting image generation using stroke-based rendering. In this work, we use a watercolor model simulating behavior of watercolor to paint a brush stroke. Using this model, we mimic uneven painting of brush stroke similar to the actual watercolor painting. Also, we repeat drawing with a brush while reducing the brush size in the same way as real painting. To paint strokes to the canvas, we use a greedy approach such that the difference an input image and the canvas image is minimized. As a result, we can generate high quality watercolor painting images that closely approximate the input image while having a watercolor painting style.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The main contribution of this work is to propose a watercolor painting image generation using stroke-based rendering. In this work, we use a watercolor model simulating behavior of watercolor to paint a brush stroke. Using this model, we mimic uneven painting of brush stroke similar to the actual watercolor painting. Also, we repeat drawing with a brush while reducing the brush size in the same way as real painting. To paint strokes to the canvas, we use a greedy approach such that the difference an input image and the canvas image is minimized. As a result, we can generate high quality watercolor painting images that closely approximate the input image while having a watercolor painting style.",
"fno": "526800a465",
"keywords": [
"Art",
"Image Processing",
"Paints",
"Rendering Computer Graphics",
"Watercolor Painting Image Generation",
"Stroke Based Rendering",
"Brush Stroke",
"Actual Watercolor Painting",
"Canvas Image",
"High Quality Watercolor Painting",
"Watercolor Painting Style",
"Watercolor Painting Image Generation",
"Stroke Based Rendering",
"Watercolor Model"
],
"authors": [
{
"affiliation": "Hiroshima University",
"fullName": "Hisaki Yamane",
"givenName": "Hisaki",
"surname": "Yamane",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hiroshima University",
"fullName": "Yasuaki Ito",
"givenName": "Yasuaki",
"surname": "Ito",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hiroshima University",
"fullName": "Koji Nakano",
"givenName": "Koji",
"surname": "Nakano",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "candarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-11-01T00:00:00",
"pubType": "proceedings",
"pages": "465-469",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5268-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "526800a460",
"articleId": "1gysHCe574Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "526800a470",
"articleId": "1gysGXL6GmA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgi/2004/2171/0/21710640",
"title": "Real-Time Watercolor Painting on a Distributed Paper Model",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710640/12OmNB8Cj9B",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2017/5507/0/07960025",
"title": "A flexible finger-mounted airbrush model for immersive freehand painting",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2017/07960025/12OmNBV9Ikp",
"parentPublication": {
"id": "proceedings/icis/2017/5507/0",
"title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a790",
"title": "A Tracking Method for 2D Canvas in MR-Based Interactive Painting System",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a790/12OmNqFrGCH",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2001/1227/0/12270322",
"title": "Non-Photorealistic Rendering Using Watercolor Inspired Textures and Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2001/12270322/12OmNy4r41q",
"parentPublication": {
"id": "proceedings/pg/2001/1227/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2009/3575/0/3575b024",
"title": "A CG Generation Method of Wash Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2009/3575b024/12OmNym2c6S",
"parentPublication": {
"id": "proceedings/cisis/2009/3575/0",
"title": "2009 International Conference on Complex, Intelligent and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a158",
"title": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a158/12OmNyxXlwN",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2003/04/mcg2003040070",
"title": "A Survey of Stroke-Based Rendering",
"doi": null,
"abstractUrl": "/magazine/cg/2003/04/mcg2003040070/13rRUwfqpIJ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050723",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050723/13rRUxBa5bY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2021/2835/0/283500a145",
"title": "A GPU Implementation of Watercolor Painting Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2021/283500a145/1zw5NurwD72",
"parentPublication": {
"id": "proceedings/candarw/2021/2835/0",
"title": "2021 Ninth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3nu7jSK6Q",
"doi": "10.1109/CVPR42600.2020.00846",
"title": "Painting Many Pasts: Synthesizing Time Lapse Videos of Paintings",
"normalizedTitle": "Painting Many Pasts: Synthesizing Time Lapse Videos of Paintings",
"abstract": "We introduce a new video synthesis task: synthesizing time lapse videos depicting how a given painting might have been created. Artists paint using unique combinations of brushes, strokes, and colors. There are often many possible ways to create a given painting. Our goal is to learn to capture this rich range of possibilities. Creating distributions of long-term videos is a challenge for learning-based video synthesis methods. We present a probabilistic model that, given a single image of a completed painting, recurrently synthesizes steps of the painting process. We implement this model as a convolutional neural network, and introduce a novel training scheme to enable learning from a limited dataset of painting time lapses. We demonstrate that this model can be used to sample many time steps, enabling long-term stochastic video synthesis. We evaluate our method on digital and watercolor paintings collected from video websites, and show that human raters find our synthetic videos to be similar to time lapse videos produced by real artists.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a new video synthesis task: synthesizing time lapse videos depicting how a given painting might have been created. Artists paint using unique combinations of brushes, strokes, and colors. There are often many possible ways to create a given painting. Our goal is to learn to capture this rich range of possibilities. Creating distributions of long-term videos is a challenge for learning-based video synthesis methods. We present a probabilistic model that, given a single image of a completed painting, recurrently synthesizes steps of the painting process. We implement this model as a convolutional neural network, and introduce a novel training scheme to enable learning from a limited dataset of painting time lapses. We demonstrate that this model can be used to sample many time steps, enabling long-term stochastic video synthesis. We evaluate our method on digital and watercolor paintings collected from video websites, and show that human raters find our synthetic videos to be similar to time lapse videos produced by real artists.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a new video synthesis task: synthesizing time lapse videos depicting how a given painting might have been created. Artists paint using unique combinations of brushes, strokes, and colors. There are often many possible ways to create a given painting. Our goal is to learn to capture this rich range of possibilities. Creating distributions of long-term videos is a challenge for learning-based video synthesis methods. We present a probabilistic model that, given a single image of a completed painting, recurrently synthesizes steps of the painting process. We implement this model as a convolutional neural network, and introduce a novel training scheme to enable learning from a limited dataset of painting time lapses. We demonstrate that this model can be used to sample many time steps, enabling long-term stochastic video synthesis. We evaluate our method on digital and watercolor paintings collected from video websites, and show that human raters find our synthetic videos to be similar to time lapse videos produced by real artists.",
"fno": "716800i432",
"keywords": [
"Art",
"Convolutional Neural Nets",
"Learning Artificial Intelligence",
"Video Signal Processing",
"Web Sites",
"Synthetic Videos",
"Time Lapse Videos",
"Long Term Videos",
"Long Term Stochastic Video Synthesis",
"Digital Watercolor Paintings",
"Learning Based Video Synthesis",
"Video Web Sites",
"Painting",
"Videos",
"Paints",
"Task Analysis",
"Probabilistic Logic",
"Training",
"Lighting"
],
"authors": [
{
"affiliation": "MIT",
"fullName": "Amy Zhao",
"givenName": "Amy",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT",
"fullName": "Guha Balakrishnan",
"givenName": "Guha",
"surname": "Balakrishnan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT",
"fullName": "Kathleen M. Lewis",
"givenName": "Kathleen M.",
"surname": "Lewis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT",
"fullName": "Frédo Durand",
"givenName": "Frédo",
"surname": "Durand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT",
"fullName": "John V. Guttag",
"givenName": "John V.",
"surname": "Guttag",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT",
"fullName": "Adrian V. Dalca",
"givenName": "Adrian V.",
"surname": "Dalca",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "8432-8442",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800i422",
"articleId": "1m3nS6bC3lu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800i443",
"articleId": "1m3ni3clQis",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/case/2012/0430/0/06386490",
"title": "Development of semi-automatic painting system for inner hull block structures",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386490/12OmNBDQbmn",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2004/05/mcg2004050076",
"title": "Real-Time Painting with an Expressive Virtual Chinese Brush",
"doi": null,
"abstractUrl": "/magazine/cg/2004/05/mcg2004050076/13rRUwfqpG4",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010074",
"title": "Video Painting with Space-Time-Varying Style Parameters",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010074/13rRUxAAT0N",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050723",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050723/13rRUxBa5bY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042343",
"title": "A Modular Framework for Digital Painting",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042343/13rRUxDIthe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c364",
"title": "Learning to Generate Time-Lapse Videos Using Multi-stage Dynamic Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c364/17D45W9KVJU",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08765801",
"title": "Vectorized Painting with Temporal Diffusion Curves",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08765801/1bLypqX0rwA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798200",
"title": "Panoramic Fluid Painting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798200/1cJ0VsoPxfO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1zw5CYExBa8",
"title": "2021 Ninth International Symposium on Computing and Networking Workshops (CANDARW)",
"acronym": "candarw",
"groupId": "1829704",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1zw5NurwD72",
"doi": "10.1109/CANDARW53999.2021.00031",
"title": "A GPU Implementation of Watercolor Painting Image Generation",
"normalizedTitle": "A GPU Implementation of Watercolor Painting Image Generation",
"abstract": "Stroke-based rendering is a technique to generate images with the painting effect of an actual brush stroke by repeating a drawing, called a stroke, many times. In this paper, we propose a watercolor image generation with stroke-based rendering. We use a physical model that simulates a watercolor painting on paper by computing the movement of water and pigment. This method requires a large number of strokes to be drawn one by one, and the computational cost of watercolor simulation for each stroke drawn is also very high. Therefore, to reduce the generation time, we propose a parallel algorithm for drawing multiple strokes simultaneously. The idea of the parallel algorithm is to generate multiple strokes independently. However, since some of the generated strokes have overlaps, such strokes cannot be drawn simultaneously. In our approach, we find non-overlapping strokes by reducing this problem to the independent point set problem and solving it instead. Furthermore, we implement this parallel algorithm on the GPU. Experimental results show that the proposed GPU implementation on NVIDIA GeForce RTX 3090 attains a speed-up factor of up to 75 over a sequential execution on the CPU.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Stroke-based rendering is a technique to generate images with the painting effect of an actual brush stroke by repeating a drawing, called a stroke, many times. In this paper, we propose a watercolor image generation with stroke-based rendering. We use a physical model that simulates a watercolor painting on paper by computing the movement of water and pigment. This method requires a large number of strokes to be drawn one by one, and the computational cost of watercolor simulation for each stroke drawn is also very high. Therefore, to reduce the generation time, we propose a parallel algorithm for drawing multiple strokes simultaneously. The idea of the parallel algorithm is to generate multiple strokes independently. However, since some of the generated strokes have overlaps, such strokes cannot be drawn simultaneously. In our approach, we find non-overlapping strokes by reducing this problem to the independent point set problem and solving it instead. Furthermore, we implement this parallel algorithm on the GPU. Experimental results show that the proposed GPU implementation on NVIDIA GeForce RTX 3090 attains a speed-up factor of up to 75 over a sequential execution on the CPU.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Stroke-based rendering is a technique to generate images with the painting effect of an actual brush stroke by repeating a drawing, called a stroke, many times. In this paper, we propose a watercolor image generation with stroke-based rendering. We use a physical model that simulates a watercolor painting on paper by computing the movement of water and pigment. This method requires a large number of strokes to be drawn one by one, and the computational cost of watercolor simulation for each stroke drawn is also very high. Therefore, to reduce the generation time, we propose a parallel algorithm for drawing multiple strokes simultaneously. The idea of the parallel algorithm is to generate multiple strokes independently. However, since some of the generated strokes have overlaps, such strokes cannot be drawn simultaneously. In our approach, we find non-overlapping strokes by reducing this problem to the independent point set problem and solving it instead. Furthermore, we implement this parallel algorithm on the GPU. Experimental results show that the proposed GPU implementation on NVIDIA GeForce RTX 3090 attains a speed-up factor of up to 75 over a sequential execution on the CPU.",
"fno": "283500a145",
"keywords": [
"Colour Graphics",
"Computer Graphics",
"Graphics Processing Units",
"Image Colour Analysis",
"Image Texture",
"Painting",
"Parallel Algorithms",
"Rendering Computer Graphics",
"Watercolor Image Generation",
"Stroke Based Rendering",
"Watercolor Simulation",
"Generation Time",
"Parallel Algorithm",
"Drawing Multiple Strokes",
"Generated Strokes",
"GPU Implementation",
"Watercolor Painting Image Generation",
"Painting Effect",
"Actual Brush Stroke",
"Greedy Algorithms",
"Image Synthesis",
"Computational Modeling",
"Conferences",
"Graphics Processing Units",
"Pigments",
"Rendering Computer Graphics",
"Stroke Based Rendering",
"Watercolor Simulation",
"GPU",
"Parallel Algorithm"
],
"authors": [
{
"affiliation": "Hiroshima University,Graduate School of Advanced Science and Engineering,Higashi-Hiroshima,Japan,739-8527",
"fullName": "Jiamian Huang",
"givenName": "Jiamian",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hiroshima University,Graduate School of Advanced Science and Engineering,Higashi-Hiroshima,Japan,739-8527",
"fullName": "Yasuaki Ito",
"givenName": "Yasuaki",
"surname": "Ito",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hiroshima University,Graduate School of Advanced Science and Engineering,Higashi-Hiroshima,Japan,739-8527",
"fullName": "Koji Nakano",
"givenName": "Koji",
"surname": "Nakano",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "candarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "145-151",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2835-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "283500a138",
"articleId": "1zw5NYUz7Ko",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "283500a152",
"articleId": "1zw5MUOiBKU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2012/4836/0/4836a013",
"title": "An Automatic Rendering Method of Line Strokes for Chinese Landscape Painting",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2012/4836a013/12OmNy3RRF5",
"parentPublication": {
"id": "proceedings/icvrv/2012/4836/0",
"title": "2012 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2001/1227/0/12270322",
"title": "Non-Photorealistic Rendering Using Watercolor Inspired Textures and Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2001/12270322/12OmNy4r41q",
"parentPublication": {
"id": "proceedings/pg/2001/1227/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a158",
"title": "Real-Time Watercolor Simulation with Fluid Vorticity Within Brush Stroke",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a158/12OmNyxXlwN",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2007/3033/0/30190041",
"title": "Automatic Generation of Traditional Style Painting by Using Density-Based Color Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2007/30190041/12OmNzVGcIp",
"parentPublication": {
"id": "proceedings/icdmw/2007/3033/0",
"title": "Seventh IEEE International Conference on Data Mining Workshops (ICDMW 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050723",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050723/13rRUxBa5bY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/03/v0266",
"title": "Efficient Example-Based Painting and Synthesis of 2D Directional Texture",
"doi": null,
"abstractUrl": "/journal/tg/2004/03/v0266/13rRUxcbnH1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2022/7532/0/753200a127",
"title": "ConvUNeXt: A Lightweight Convolutional Neural Network for Watercolor Image Translation",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2022/753200a127/1LAz1e7EN8s",
"parentPublication": {
"id": "proceedings/candarw/2022/7532/0",
"title": "2022 Tenth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08765801",
"title": "Vectorized Painting with Temporal Diffusion Curves",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08765801/1bLypqX0rwA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2019/5268/0/526800a465",
"title": "A Watercolor Painting Image Generation Using Stroke-Based Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2019/526800a465/1gysHZ4vu8M",
"parentPublication": {
"id": "proceedings/candarw/2019/5268/0",
"title": "2019 Seventh International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpbd&is/2020/6512/0/09130594",
"title": "Efficient Image Watercolorization Based on Smart Phones",
"doi": null,
"abstractUrl": "/proceedings-article/hpbd&is/2020/09130594/1l6SQV4kDN6",
"parentPublication": {
"id": "proceedings/hpbd&is/2020/6512/0",
"title": "2020 International Conference on High Performance Big Data and Intelligent Systems (HPBD&IS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNASrawy",
"title": "2006 3rd International Symposium on Voronoi Diagrams in Science and Engineering",
"acronym": "isvd",
"groupId": "1001201",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAIvcYb",
"doi": "10.1109/ISVD.2006.28",
"title": "On the Stretch Factor of the Constrained Delaunay Triangulation",
"normalizedTitle": "On the Stretch Factor of the Constrained Delaunay Triangulation",
"abstract": "Given a set P of n points in the plane and a set S of non-crossing line segments whose endpoints are in P, let CDT(P, S) be the constrained Delaunay triangulation of P with respect to S. Given any two visible points p, q \\in P, we show that there exists a path from p to q in CDT(P, S), denoted SPCDT(p, q), such that every edge in the path has length at most |pq| and the ratio |SP CDT(p, q)|/|pq| is at most \\frac{{4\\pi \\sqrt 3 }}{9}( \\approx 2.42), thereby improving on the previously known bound of \\frac{{\\pi (1 + \\sqrt 5 }}{2}( \\approx 5.08).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Given a set P of n points in the plane and a set S of non-crossing line segments whose endpoints are in P, let CDT(P, S) be the constrained Delaunay triangulation of P with respect to S. Given any two visible points p, q \\in P, we show that there exists a path from p to q in CDT(P, S), denoted SPCDT(p, q), such that every edge in the path has length at most |pq| and the ratio |SP CDT(p, q)|/|pq| is at most \\frac{{4\\pi \\sqrt 3 }}{9}( \\approx 2.42), thereby improving on the previously known bound of \\frac{{\\pi (1 + \\sqrt 5 }}{2}( \\approx 5.08).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Given a set P of n points in the plane and a set S of non-crossing line segments whose endpoints are in P, let CDT(P, S) be the constrained Delaunay triangulation of P with respect to S. Given any two visible points p, q \\in P, we show that there exists a path from p to q in CDT(P, S), denoted SPCDT(p, q), such that every edge in the path has length at most |pq| and the ratio |SP CDT(p, q)|/|pq| is at most \\frac{{4\\pi \\sqrt 3 }}{9}( \\approx 2.42), thereby improving on the previously known bound of \\frac{{\\pi (1 + \\sqrt 5 }}{2}( \\approx 5.08).",
"fno": "26300025",
"keywords": [],
"authors": [
{
"affiliation": "Carleton University, Ontario, Canada",
"fullName": "Prosenjit Bose",
"givenName": "Prosenjit",
"surname": "Bose",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Saskatchewan, Canada",
"fullName": "J. Mark Keil",
"givenName": "J. Mark",
"surname": "Keil",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isvd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-07-01T00:00:00",
"pubType": "proceedings",
"pages": "25-31",
"year": "2006",
"issn": null,
"isbn": "0-7695-2630-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "26300018",
"articleId": "12OmNs0kyEy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "26300032",
"articleId": "12OmNBvkdmS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2011/4353/2/05750952",
"title": "Image Completion Using Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2011/05750952/12OmNBNM8Sj",
"parentPublication": {
"id": "icicta/2011/4353/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/22440406",
"title": "Dynamic Removal Algorithm for Constrained Delaunay Triangulations",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/22440406/12OmNCcKQff",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/1/3571a248",
"title": "An Intelligent Method of Detecting Multi-factors Neighborhood Relation Based On Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571a248/12OmNvDZF7c",
"parentPublication": {
"id": "gcis/2009/3571/1",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcsw/2001/1080/0/10800390",
"title": "An Efficient Method for Computing the Feasible Region with Translational Containment between Two Convex Polygons",
"doi": null,
"abstractUrl": "/proceedings-article/icdcsw/2001/10800390/12OmNxR5UKl",
"parentPublication": {
"id": "proceedings/icdcsw/2001/1080/0",
"title": "Proceedings 21st International Conference on Distributed Computing Systems Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028293",
"title": "Constrained Delaunay triangulation for multiresolution surface description",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028293/12OmNxR5USd",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/spdp/1993/4222/0/0395482",
"title": "Folded Petersen cube networks: new competitors for the hypercubes",
"doi": null,
"abstractUrl": "/proceedings-article/spdp/1993/0395482/12OmNyz5JZJ",
"parentPublication": {
"id": "proceedings/spdp/1993/4222/0",
"title": "Parallel and Distributed Processing, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/1999/0210/0/02100147",
"title": "The Delaunay Constrained Triangulation: The Delaunay Stable Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100147/12OmNzvQI95",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1986/07/01676813",
"title": "An Efficient Memory System for Image Processing",
"doi": null,
"abstractUrl": "/journal/tc/1986/07/01676813/13rRUwhpBMZ",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050736",
"title": "Computing 2D Constrained Delaunay Triangulation Using the GPU",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050736/13rRUxASuSM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1978/02/01675045",
"title": "Memory Systems for Image Processing",
"doi": null,
"abstractUrl": "/journal/tc/1978/02/01675045/13rRUxYIN2P",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1lgoo5VSJBm",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"acronym": "icicta",
"groupId": "1002487",
"volume": "2",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBNM8Sj",
"doi": "10.1109/ICICTA.2011.427",
"title": "Image Completion Using Constrained Delaunay Triangulation",
"normalizedTitle": "Image Completion Using Constrained Delaunay Triangulation",
"abstract": "In this paper, we proposed a novel algorithm of automatic image structure completion. Different from traditional image completion algorithms directly copying patches from the unknown region to the damaged part, our completion approach first reconstructs the geometry structures in the damaged region with edges inferred by Constrained Delaunay Triangulation (CDT), and select correct edges through a logistic regression classifier. After that, we fill in the region with structure propagation on edges and texture synthesis for other unstructured part respectively. Several experimental results and comparisons demonstrate the effectiveness of our structure completion approach, especially for images with large missing regions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we proposed a novel algorithm of automatic image structure completion. Different from traditional image completion algorithms directly copying patches from the unknown region to the damaged part, our completion approach first reconstructs the geometry structures in the damaged region with edges inferred by Constrained Delaunay Triangulation (CDT), and select correct edges through a logistic regression classifier. After that, we fill in the region with structure propagation on edges and texture synthesis for other unstructured part respectively. Several experimental results and comparisons demonstrate the effectiveness of our structure completion approach, especially for images with large missing regions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we proposed a novel algorithm of automatic image structure completion. Different from traditional image completion algorithms directly copying patches from the unknown region to the damaged part, our completion approach first reconstructs the geometry structures in the damaged region with edges inferred by Constrained Delaunay Triangulation (CDT), and select correct edges through a logistic regression classifier. After that, we fill in the region with structure propagation on edges and texture synthesis for other unstructured part respectively. Several experimental results and comparisons demonstrate the effectiveness of our structure completion approach, especially for images with large missing regions.",
"fno": "05750952",
"keywords": [
"Image Classification",
"Image Reconstruction",
"Image Texture",
"Mesh Generation",
"Regression Analysis",
"Constrained Delaunay Triangulation",
"Automatic Image Structure Completion Algorithm",
"Geometry Structures Reconstruction",
"Logistic Regression Classifier",
"Texture Synthesis",
"Image Edge Detection",
"Image Segmentation",
"Joining Processes",
"Decision Support Systems",
"Logistics",
"Pattern Analysis",
"Image Color Analysis",
"Image Completion",
"Constrained Delaunay Triangulatioin",
"Logistic Regression"
],
"authors": [
{
"affiliation": null,
"fullName": "Han Zhou",
"givenName": "Han",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rong Zhang",
"givenName": "Rong",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Baoyun Wang",
"givenName": "Baoyun",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dong Yin",
"givenName": "Dong",
"surname": "Yin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icicta",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-03-01T00:00:00",
"pubType": "proceedings",
"pages": "568-571",
"year": "2011",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05750951",
"articleId": "12OmNBKEylH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05750953",
"articleId": "12OmNqBtiVl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2007/2794/0/04118740",
"title": "Interactive Image Repair with Assisted Structure and Texture Completion",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2007/04118740/12OmNvjgWvC",
"parentPublication": {
"id": "proceedings/wacv/2007/2794/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761110",
"title": "Image inpainting using wavelet-based inter- and intra-scale dependency",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761110/12OmNwswg6w",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2017/6029/0/6029a328",
"title": "Image Completion Using Sample Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2017/6029a328/12OmNxQOjwX",
"parentPublication": {
"id": "proceedings/aina/2017/6029/0",
"title": "2017 IEEE 31st International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2009/3883/0/3883a824",
"title": "Image Completion Based on Weighting Patch Match",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2009/3883a824/12OmNxVV60P",
"parentPublication": {
"id": "proceedings/icig/2009/3883/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2014/3244/0/3244a041",
"title": "Text Detection Using Delaunay Triangulation in Video Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/das/2014/3244a041/12OmNyL0TD0",
"parentPublication": {
"id": "proceedings/das/2014/3244/0",
"title": "2014 11th IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eidwt/2013/2141/0/5044a696",
"title": "Exemplar Image Completion Based on Evolutionary Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/eidwt/2013/5044a696/12OmNyrIatO",
"parentPublication": {
"id": "proceedings/eidwt/2013/2141/0",
"title": "2013 Fourth International Conference on Emerging Intelligent Data and Web Technologies (EIDWT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981a509",
"title": "Saliency-Aware Image Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a509/12OmNz4SOoH",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2012/4896/0/4896a305",
"title": "Image Completion with Automatic Structure Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2012/4896a305/12OmNzBOimX",
"parentPublication": {
"id": "proceedings/cis/2012/4896/0",
"title": "2012 Eighth International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851a488",
"title": "Multiview Image Completion with Space Structure Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851a488/12OmNzUgdhd",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h947",
"title": "Prior Based Human Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h947/1yeJhkxj7K8",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCdk2XT",
"title": "Advances in Parallel and Distributed Computing Conference",
"acronym": "apdc",
"groupId": "1002311",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTawsE",
"doi": "10.1109/APDC.1997.574023",
"title": "An Improved Parallel Algorithm for Delaunay Triangulation on Distributed Memory Parallel Computers",
"normalizedTitle": "An Improved Parallel Algorithm for Delaunay Triangulation on Distributed Memory Parallel Computers",
"abstract": "Delaunay triangulation has been much used in such applications as volume rendering, shape representation, terrain modeling and so on. The main disadvantage of Delaunay triangulation is large computation time required to obtain the triangulation on an input points set. This time can be reduced by using more than one processor, and several parallel algorithms for Delaunay triangulation have been proposed. In this paper, we propose an improved parallel algorithm for Delaunay triangulation, which partitions the bounding convex region of the input points set into a number of regions by using Delaunay edges and generates Delaunay triangles in each region by applying an incremental construction approach. Partitioning by Delaunay edges makes it possible to eliminate merging step required for integrating subresults. It is shown from the experiments that the proposed algorithm has good load balance and is more efficient than Cignoni et al.'s algorithm (1993) and our previous algorithm (1996).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Delaunay triangulation has been much used in such applications as volume rendering, shape representation, terrain modeling and so on. The main disadvantage of Delaunay triangulation is large computation time required to obtain the triangulation on an input points set. This time can be reduced by using more than one processor, and several parallel algorithms for Delaunay triangulation have been proposed. In this paper, we propose an improved parallel algorithm for Delaunay triangulation, which partitions the bounding convex region of the input points set into a number of regions by using Delaunay edges and generates Delaunay triangles in each region by applying an incremental construction approach. Partitioning by Delaunay edges makes it possible to eliminate merging step required for integrating subresults. It is shown from the experiments that the proposed algorithm has good load balance and is more efficient than Cignoni et al.'s algorithm (1993) and our previous algorithm (1996).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Delaunay triangulation has been much used in such applications as volume rendering, shape representation, terrain modeling and so on. The main disadvantage of Delaunay triangulation is large computation time required to obtain the triangulation on an input points set. This time can be reduced by using more than one processor, and several parallel algorithms for Delaunay triangulation have been proposed. In this paper, we propose an improved parallel algorithm for Delaunay triangulation, which partitions the bounding convex region of the input points set into a number of regions by using Delaunay edges and generates Delaunay triangles in each region by applying an incremental construction approach. Partitioning by Delaunay edges makes it possible to eliminate merging step required for integrating subresults. It is shown from the experiments that the proposed algorithm has good load balance and is more efficient than Cignoni et al.'s algorithm (1993) and our previous algorithm (1996).",
"fno": "78760131",
"keywords": [
"Mesh Generation",
"Delaunay Triangulation",
"Parallel Algorithms",
"Distributed Memory Parallel Computers",
"Computation Time",
"Load Balance"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci. & Eng., POSTECH, Pohang, South Korea",
"fullName": "Sangyoon Lee",
"givenName": "Sangyoon",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci. & Eng., POSTECH, Pohang, South Korea",
"fullName": "Chan-Ik Park",
"givenName": "Chan-Ik",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci. & Eng., POSTECH, Pohang, South Korea",
"fullName": "Chan-Mo Park",
"givenName": "Chan-Mo",
"surname": "Park",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "apdc",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-03-01T00:00:00",
"pubType": "proceedings",
"pages": "131",
"year": "1997",
"issn": null,
"isbn": "0-8186-7876-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "78760124",
"articleId": "12OmNBNM8Zb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "78760139",
"articleId": "12OmNzt0IQN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1lgoo57m8rS",
"title": "2009 WRI Global Congress on Intelligent Systems",
"acronym": "gcis",
"groupId": "1002842",
"volume": "1",
"displayVolume": "1",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvDZF7c",
"doi": "10.1109/GCIS.2009.281",
"title": "An Intelligent Method of Detecting Multi-factors Neighborhood Relation Based On Constrained Delaunay Triangulation",
"normalizedTitle": "An Intelligent Method of Detecting Multi-factors Neighborhood Relation Based On Constrained Delaunay Triangulation",
"abstract": "Spatial neighborhood relation detecting is the basis of organization, query, analysis and reasoning of spatial data. For the spatial neighborhood relations of the geographic entities are contained in the Delaunay triangulation, in this paper, the spatial neighborhood relations between multi-factors (including points, lines and polygons) are intelligently detected based on the constrained Delaunay triangulation (CDT). This approach consists of steps listed below: A matched candidate points index is set up in order to reorganize the related data of multi-factors. Then the CDT, which is constrained by edges of lines and polygons, is established with the source of coordinates in point index. After coding the CDT according to a rule this paper proposing, neighborhood relation of geographic entities can be searched automatically. And a global neighborhood relation (including separation and neighbor relation) among multi-factors is automatically established by spatial reasoning. This intelligent method of spatial neighborhood relation detecting, which is no need for manual intervention and not limited between two kinds of geographic entities, has high precision and great feasibility in practice.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Spatial neighborhood relation detecting is the basis of organization, query, analysis and reasoning of spatial data. For the spatial neighborhood relations of the geographic entities are contained in the Delaunay triangulation, in this paper, the spatial neighborhood relations between multi-factors (including points, lines and polygons) are intelligently detected based on the constrained Delaunay triangulation (CDT). This approach consists of steps listed below: A matched candidate points index is set up in order to reorganize the related data of multi-factors. Then the CDT, which is constrained by edges of lines and polygons, is established with the source of coordinates in point index. After coding the CDT according to a rule this paper proposing, neighborhood relation of geographic entities can be searched automatically. And a global neighborhood relation (including separation and neighbor relation) among multi-factors is automatically established by spatial reasoning. This intelligent method of spatial neighborhood relation detecting, which is no need for manual intervention and not limited between two kinds of geographic entities, has high precision and great feasibility in practice.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Spatial neighborhood relation detecting is the basis of organization, query, analysis and reasoning of spatial data. For the spatial neighborhood relations of the geographic entities are contained in the Delaunay triangulation, in this paper, the spatial neighborhood relations between multi-factors (including points, lines and polygons) are intelligently detected based on the constrained Delaunay triangulation (CDT). This approach consists of steps listed below: A matched candidate points index is set up in order to reorganize the related data of multi-factors. Then the CDT, which is constrained by edges of lines and polygons, is established with the source of coordinates in point index. After coding the CDT according to a rule this paper proposing, neighborhood relation of geographic entities can be searched automatically. And a global neighborhood relation (including separation and neighbor relation) among multi-factors is automatically established by spatial reasoning. This intelligent method of spatial neighborhood relation detecting, which is no need for manual intervention and not limited between two kinds of geographic entities, has high precision and great feasibility in practice.",
"fno": "3571a248",
"keywords": [
"Data Analysis",
"Mesh Generation",
"Query Processing",
"Intelligent Method",
"Multifactors Neighborhood Relation",
"Constrained Delaunay Triangulation",
"Data Organization",
"Data Query",
"Data Analysis",
"Data Reasoning",
"Coding",
"Geographic Entities",
"Intelligent Systems",
"Data Structures",
"Geographic Information Systems",
"Computer Aided Instruction",
"Object Detection",
"Educational Programs",
"Databases",
"Network Topology",
"Joining Processes",
"Intelligent Networks",
"Constrained Delaunay Triangulation CDT",
"Neighborhood Relation",
"Multi Factors",
"Spatial Reasoning",
"An Intelligent Method"
],
"authors": [
{
"affiliation": "Sch. of Geographic & Oceanogr. Sci., Nanjing Univ., Nanjing, China",
"fullName": "Wei Wei",
"givenName": "Wei",
"surname": "Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Geographic & Oceanogr. Sci., Nanjing Univ., Nanjing, China",
"fullName": "LI Man-chun",
"givenName": "LI",
"surname": "Man-chun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanjing Normal University",
"fullName": "Long Yi",
"givenName": "Long",
"surname": "Yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Geographic & Oceanogr. Sci., Nanjing Univ., Nanjing, China",
"fullName": "Yong-xue Liu",
"givenName": "Yong-xue",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Geographic & Oceanogr. Sci., Nanjing Univ., Nanjing, China",
"fullName": "Dong Cai",
"givenName": "Dong",
"surname": "Cai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "gcis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-05-01T00:00:00",
"pubType": "proceedings",
"pages": "248-252",
"year": "2009",
"issn": "2155-6083",
"isbn": "978-0-7695-3571-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3571a168",
"articleId": "12OmNBEYzMn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3571a178",
"articleId": "12OmNzFMFr7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iitsi/2009/3579/0/3579a047",
"title": "Describing and Calculating of Geometry Adjacency Relation with Voronoi Tessellation",
"doi": null,
"abstractUrl": "/proceedings-article/iitsi/2009/3579a047/12OmNBKW9BU",
"parentPublication": {
"id": "proceedings/iitsi/2009/3579/0",
"title": "Intelligent Information Technology and Security Informatics, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2011/4353/2/05750952",
"title": "Image Completion Using Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2011/05750952/12OmNBNM8Sj",
"parentPublication": {
"id": "icicta/2011/4353/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmlc/2003/7865/3/01259766",
"title": "Spatio-temporal representation for multi-dimensional occlusion relation",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2003/01259766/12OmNrIrPqd",
"parentPublication": {
"id": "proceedings/icmlc/2003/7865/3",
"title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2008/3508/1/3508a102",
"title": "An Algorithm for Spatial Outlier Detection Based on Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2008/3508a102/12OmNxaNGmn",
"parentPublication": {
"id": "proceedings/cis/2008/3508/1",
"title": "2008 International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicic/2008/3161/0/31610115",
"title": "Qualitative Description and Reasoning of Topological Relation in Three-Dimensional GIS",
"doi": null,
"abstractUrl": "/proceedings-article/icicic/2008/31610115/12OmNxuo0go",
"parentPublication": {
"id": "proceedings/icicic/2008/3161/0",
"title": "Innovative Computing ,Information and Control, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2014/3244/0/3244a041",
"title": "Text Detection Using Delaunay Triangulation in Video Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/das/2014/3244a041/12OmNyL0TD0",
"parentPublication": {
"id": "proceedings/das/2014/3244/0",
"title": "2014 11th IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2007/08/k1116",
"title": "A Family of Directional Relation Models for Extended Objects",
"doi": null,
"abstractUrl": "/journal/tk/2007/08/k1116/13rRUwInvli",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wiamis/2007/2818/0/04279125",
"title": "Probabilistic Matching Algorithm for Keypoint Based Object Tracking Using a Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/wiamis/2007/04279125/17D45XfSET4",
"parentPublication": {
"id": "proceedings/wiamis/2007/2818/0",
"title": "Image Analysis for Multimedia Interactive Services, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09954199",
"title": "Semi-supervised Entity Alignment via Relation-based Adaptive Neighborhood Matching",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09954199/1InoqSa0QfK",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2022/9402/0/940200a821",
"title": "Weighted Multi-granulation Containment Neighborhood Rough Set Model",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2022/940200a821/1MBECx2Wd8I",
"parentPublication": {
"id": "proceedings/wi-iat/2022/9402/0",
"title": "2022 IEEE/WIC/ACM International Joint Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqAU6sQ",
"title": "Computer Research and Development, International Conference on",
"acronym": "iccrd",
"groupId": "1800063",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvTk00T",
"doi": "10.1109/ICCRD.2010.19",
"title": "Delaunay Triangulation Based Three Dimensional Anatomical Facial Reconstruction from 2D CT Slices",
"normalizedTitle": "Delaunay Triangulation Based Three Dimensional Anatomical Facial Reconstruction from 2D CT Slices",
"abstract": "A novel approach for 3D anatomical facial reconstruction from 2D CT images using Delaunay Triangulation is proposed in this paper. The method involves individual slice thresholding, contour finding, point cloud generation, Delaunay triangulation and redundant tetrahedron removal. The results are compared with standard marching cube algorithm and with standard commercial 3D software. The performance is studied based on time complexity and improvement in resolution.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A novel approach for 3D anatomical facial reconstruction from 2D CT images using Delaunay Triangulation is proposed in this paper. The method involves individual slice thresholding, contour finding, point cloud generation, Delaunay triangulation and redundant tetrahedron removal. The results are compared with standard marching cube algorithm and with standard commercial 3D software. The performance is studied based on time complexity and improvement in resolution.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A novel approach for 3D anatomical facial reconstruction from 2D CT images using Delaunay Triangulation is proposed in this paper. The method involves individual slice thresholding, contour finding, point cloud generation, Delaunay triangulation and redundant tetrahedron removal. The results are compared with standard marching cube algorithm and with standard commercial 3D software. The performance is studied based on time complexity and improvement in resolution.",
"fno": "4043a326",
"keywords": [
"3 D Reconstruction",
"Delaunay Triangulation",
"Convex Hull",
"Marching Cube",
"Point Cloud"
],
"authors": [
{
"affiliation": null,
"fullName": "R. Menaka",
"givenName": "R.",
"surname": "Menaka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Maya Eapen",
"givenName": "Maya",
"surname": "Eapen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "C. Chellamuthu",
"givenName": "C.",
"surname": "Chellamuthu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccrd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-05-01T00:00:00",
"pubType": "proceedings",
"pages": "326-330",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4043-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4043a322",
"articleId": "12OmNB8kHN9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4043a331",
"articleId": "12OmNBp52BF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/mass/2012/2433/0/06502544",
"title": "On the spanning ratio of partial Delaunay triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2012/06502544/12OmNBhpS1z",
"parentPublication": {
"id": "proceedings/mass/2012/2433/0",
"title": "2012 IEEE 9th International Conference on Mobile Ad-Hoc and Sensor Systems (MASS 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2011/4428/0/4428a052",
"title": "A Parallel 3D Delaunay Triangulation Method",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2011/4428a052/12OmNrIaehr",
"parentPublication": {
"id": "proceedings/ispa/2011/4428/0",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2010/4190/0/4190a224",
"title": "The Merge Phase of Parallel Divide-and-Conquer Scheme for 3D Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2010/4190a224/12OmNwDj12p",
"parentPublication": {
"id": "proceedings/ispa/2010/4190/0",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciis/1999/0446/0/04460452",
"title": "Fingerprint Identification Using Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/iciis/1999/04460452/12OmNwNwzFJ",
"parentPublication": {
"id": "proceedings/iciis/1999/0446/0",
"title": "Information, Intelligence, and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2011/4483/0/4483a160",
"title": "Global Surface Remeshing Using Symmetric Delaunay Triangulation in Uniformization Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2011/4483a160/12OmNwudQMl",
"parentPublication": {
"id": "proceedings/isvd/2011/4483/0",
"title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2011/4483/0/4483a039",
"title": "Isotropic Mesh Simplification by Evolving the Geodesic Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2011/4483a039/12OmNwwd2VI",
"parentPublication": {
"id": "proceedings/isvd/2011/4483/0",
"title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2008/3267/0/3267a282",
"title": "Local Delaunay Triangulation for Mobile Nodes",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2008/3267a282/12OmNxwWoUv",
"parentPublication": {
"id": "proceedings/icetet/2008/3267/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/1/3987a447",
"title": "Algorithm of the Delaunay Triangulation Net Interpolated Feature Points for Borehole Data",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987a447/12OmNy6Zs45",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/1",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/1999/0210/0/02100147",
"title": "The Delaunay Constrained Triangulation: The Delaunay Stable Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100147/12OmNzvQI95",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/05/mcg1995050062",
"title": "Delaunay Triangulation in Three Dimensions",
"doi": null,
"abstractUrl": "/magazine/cg/1995/05/mcg1995050062/13rRUwInvMz",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzlD94K",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"acronym": "ispa",
"groupId": "1002557",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwDj12p",
"doi": "10.1109/ISPA.2010.71",
"title": "The Merge Phase of Parallel Divide-and-Conquer Scheme for 3D Delaunay Triangulation",
"normalizedTitle": "The Merge Phase of Parallel Divide-and-Conquer Scheme for 3D Delaunay Triangulation",
"abstract": "In parallel generation of 3D Delaunay triangulation, the merge phase is the main part that affects the parallel efficiency. In this work, the geometric properties of the merging triangulation between two Delaunay triangulations were identified. Several Delaunay triangulations of random point sets were used to generate merging triangulation two by two. From the experimental results, the generated interface triangulation are all point free which satisfy the criterion of Delaunay triangulation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In parallel generation of 3D Delaunay triangulation, the merge phase is the main part that affects the parallel efficiency. In this work, the geometric properties of the merging triangulation between two Delaunay triangulations were identified. Several Delaunay triangulations of random point sets were used to generate merging triangulation two by two. From the experimental results, the generated interface triangulation are all point free which satisfy the criterion of Delaunay triangulation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In parallel generation of 3D Delaunay triangulation, the merge phase is the main part that affects the parallel efficiency. In this work, the geometric properties of the merging triangulation between two Delaunay triangulations were identified. Several Delaunay triangulations of random point sets were used to generate merging triangulation two by two. From the experimental results, the generated interface triangulation are all point free which satisfy the criterion of Delaunay triangulation.",
"fno": "4190a224",
"keywords": [
"Delaunay Triangulation",
"Tetrahedralization"
],
"authors": [
{
"affiliation": null,
"fullName": "Min-Bin Chen",
"givenName": "Min-Bin",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ispa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-09-01T00:00:00",
"pubType": "proceedings",
"pages": "224-230",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4190-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4190a220",
"articleId": "12OmNwwuDQ3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4190a231",
"articleId": "12OmNzIUfNr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/apdc/1997/7876/0/78760131",
"title": "An Improved Parallel Algorithm for Delaunay Triangulation on Distributed Memory Parallel Computers",
"doi": null,
"abstractUrl": "/proceedings-article/apdc/1997/78760131/12OmNBTawsE",
"parentPublication": {
"id": "proceedings/apdc/1997/7876/0",
"title": "Advances in Parallel and Distributed Computing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2011/4428/0/4428a052",
"title": "A Parallel 3D Delaunay Triangulation Method",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2011/4428a052/12OmNrIaehr",
"parentPublication": {
"id": "proceedings/ispa/2011/4428/0",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciis/1999/0446/0/04460452",
"title": "Fingerprint Identification Using Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/iciis/1999/04460452/12OmNwNwzFJ",
"parentPublication": {
"id": "proceedings/iciis/1999/0446/0",
"title": "Information, Intelligence, and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2011/4483/0/4483a160",
"title": "Global Surface Remeshing Using Symmetric Delaunay Triangulation in Uniformization Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2011/4483a160/12OmNwudQMl",
"parentPublication": {
"id": "proceedings/isvd/2011/4483/0",
"title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2011/4483/0/4483a039",
"title": "Isotropic Mesh Simplification by Evolving the Geodesic Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2011/4483a039/12OmNwwd2VI",
"parentPublication": {
"id": "proceedings/isvd/2011/4483/0",
"title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2008/3267/0/3267a282",
"title": "Local Delaunay Triangulation for Mobile Nodes",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2008/3267a282/12OmNxwWoUv",
"parentPublication": {
"id": "proceedings/icetet/2008/3267/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/1/3987a447",
"title": "Algorithm of the Delaunay Triangulation Net Interpolated Feature Points for Borehole Data",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987a447/12OmNy6Zs45",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/1",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2002/1760/0/17600571",
"title": "A Parallel Divide-and-Conquer Scheme for Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2002/17600571/12OmNyxFKdi",
"parentPublication": {
"id": "proceedings/icpads/2002/1760/0",
"title": "Proceedings of the Ninth International Conference on Parallel and Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/1999/0210/0/02100147",
"title": "The Delaunay Constrained Triangulation: The Delaunay Stable Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100147/12OmNzvQI95",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/05/mcg1995050062",
"title": "Delaunay Triangulation in Three Dimensions",
"doi": null,
"abstractUrl": "/magazine/cg/1995/05/mcg1995050062/13rRUwInvMz",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxisQYU",
"title": "2012 Ninth International Symposium on Voronoi Diagrams in Science and Engineering (ISVD 2012)",
"acronym": "isvd",
"groupId": "1001201",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwoxSe8",
"doi": "10.1109/ISVD.2012.9",
"title": "Localizing the Delaunay Triangulation and its Parallel Implementation",
"normalizedTitle": "Localizing the Delaunay Triangulation and its Parallel Implementation",
"abstract": "We show how to localize the Delaunay triangulation of a given planar point set, namely, bound the set of points which are possible Delaunay neighbors of a given point. We then exploit this observation in an algorithm for constructing the Delaunay triangulation (and its dual Voronoi diagram) by computing the Delaunay neighbors (and Voronoi cell) of each point independently. While this does not lead to the fastest serial algorithm possible for Delaunay triangulation, it does lead to an efficient parallelization strategy which achieves almost perfect speedups on multicore machines.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We show how to localize the Delaunay triangulation of a given planar point set, namely, bound the set of points which are possible Delaunay neighbors of a given point. We then exploit this observation in an algorithm for constructing the Delaunay triangulation (and its dual Voronoi diagram) by computing the Delaunay neighbors (and Voronoi cell) of each point independently. While this does not lead to the fastest serial algorithm possible for Delaunay triangulation, it does lead to an efficient parallelization strategy which achieves almost perfect speedups on multicore machines.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We show how to localize the Delaunay triangulation of a given planar point set, namely, bound the set of points which are possible Delaunay neighbors of a given point. We then exploit this observation in an algorithm for constructing the Delaunay triangulation (and its dual Voronoi diagram) by computing the Delaunay neighbors (and Voronoi cell) of each point independently. While this does not lead to the fastest serial algorithm possible for Delaunay triangulation, it does lead to an efficient parallelization strategy which achieves almost perfect speedups on multicore machines.",
"fno": "06257653",
"keywords": [
"Algorithm Design And Analysis",
"Complexity Theory",
"Graphics Processing Unit",
"Partitioning Algorithms",
"Data Structures",
"Spirals",
"Parallel Algorithms",
"Delaunay Triangulation",
"Voronoi Diagram",
"Parallel Computation"
],
"authors": [
{
"affiliation": null,
"fullName": "Renjie Chen",
"givenName": "Renjie",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Criag Gotsman",
"givenName": "Criag",
"surname": "Gotsman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isvd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "24-31",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1910-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06257652",
"articleId": "12OmNya72ql",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06257654",
"articleId": "12OmNyUnEF8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isvd/2006/2630/0/26300002",
"title": "Voronoi Diagram and Delaunay Triangulation: Applications and Challenges in Bioinformatics",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2006/26300002/12OmNBLdKPK",
"parentPublication": {
"id": "proceedings/isvd/2006/2630/0",
"title": "2006 3rd International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2011/4428/0/4428a052",
"title": "A Parallel 3D Delaunay Triangulation Method",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2011/4428a052/12OmNrIaehr",
"parentPublication": {
"id": "proceedings/ispa/2011/4428/0",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispdc/2016/4152/0/07904277",
"title": "Distributed and Parallel Delaunay Triangulation Construction with Balanced Binary-tree Model in Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/ispdc/2016/07904277/12OmNrnJ6MR",
"parentPublication": {
"id": "proceedings/ispdc/2016/4152/0",
"title": "2016 15th International Symposium on Parallel and Distributed Computing (ISPDC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a494",
"title": "Delaunay/Voronoi Dual Representation of Smooth 2-Manifolds",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a494/12OmNs5rkQF",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2010/4190/0/4190a224",
"title": "The Merge Phase of Parallel Divide-and-Conquer Scheme for 3D Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2010/4190a224/12OmNwDj12p",
"parentPublication": {
"id": "proceedings/ispa/2010/4190/0",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ised/2011/4570/0/4570a341",
"title": "A Novel Fuzzy-GIS Model Based on Delaunay Triangulation to Forecast Facility Locations (FGISFFL)",
"doi": null,
"abstractUrl": "/proceedings-article/ised/2011/4570a341/12OmNy2rRVS",
"parentPublication": {
"id": "proceedings/ised/2011/4570/0",
"title": "2011 International Symposium on Electronic System Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipps/1994/5602/0/0288190",
"title": "An optimal mesh computer algorithm for constrained Delaunay triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/ipps/1994/0288190/12OmNyRxFrc",
"parentPublication": {
"id": "proceedings/ipps/1994/5602/0",
"title": "Parallel Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcse/2010/4303/1/4303a187",
"title": "Fast Delaunay Triangulation and Voronoi Diagram Generation on the Sphere",
"doi": null,
"abstractUrl": "/proceedings-article/wcse/2010/4303a187/12OmNzDehe0",
"parentPublication": {
"id": "wcse/2010/4303/1",
"title": "2010 Second World Congress on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/1999/0210/0/02100147",
"title": "The Delaunay Constrained Triangulation: The Delaunay Stable Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100147/12OmNzvQI95",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09006534",
"title": "Tile & Merge: Distributed Delaunay Triangulations for Cloud Computing",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09006534/1hJs2xqQUEg",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwekjuM",
"title": "9th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "1988",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxR5USd",
"doi": "10.1109/ICPR.1988.28293",
"title": "Constrained Delaunay triangulation for multiresolution surface description",
"normalizedTitle": "Constrained Delaunay triangulation for multiresolution surface description",
"abstract": "The problem of building a constrained Delaunay triangulation (CDT) at different levels of resolution is considered for the hierarchical description of topographic surfaces. The surface is approximated at each level by a network of planar triangular faces having vertices at a subset of surface-specific points, such as peaks, pits, or passes, and including edges that describe surface-specific lines, such as ridges or valleys. Each approximation is built based on a Delaunay triangulation of the data points that includes the given constraint segments. A dynamic algorithm for constrained Delaunay triangulation is proposed. The algorithm is based on the stepwise refinement of a CDT by the incremental insertion of points and constraint segments.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The problem of building a constrained Delaunay triangulation (CDT) at different levels of resolution is considered for the hierarchical description of topographic surfaces. The surface is approximated at each level by a network of planar triangular faces having vertices at a subset of surface-specific points, such as peaks, pits, or passes, and including edges that describe surface-specific lines, such as ridges or valleys. Each approximation is built based on a Delaunay triangulation of the data points that includes the given constraint segments. A dynamic algorithm for constrained Delaunay triangulation is proposed. The algorithm is based on the stepwise refinement of a CDT by the incremental insertion of points and constraint segments.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The problem of building a constrained Delaunay triangulation (CDT) at different levels of resolution is considered for the hierarchical description of topographic surfaces. The surface is approximated at each level by a network of planar triangular faces having vertices at a subset of surface-specific points, such as peaks, pits, or passes, and including edges that describe surface-specific lines, such as ridges or valleys. Each approximation is built based on a Delaunay triangulation of the data points that includes the given constraint segments. A dynamic algorithm for constrained Delaunay triangulation is proposed. The algorithm is based on the stepwise refinement of a CDT by the incremental insertion of points and constraint segments.",
"fno": "00028293",
"keywords": [
"Pattern Recognition",
"Picture Processing",
"Pattern Recognition",
"Picture Processing",
"Multiresolution Surface Description",
"Constrained Delaunay Triangulation",
"Topographic Surfaces",
"Surface Topography",
"Piecewise Linear Approximation",
"Surface Reconstruction",
"Heuristic Algorithms",
"Computer Vision",
"Shape Measurement",
"Data Processing",
"Computer Graphics",
"Application Software",
"Embedded Computing"
],
"authors": [
{
"affiliation": "Istituto per la Matematica Applicata, Genova, Italy",
"fullName": "L. De Floriani",
"givenName": "L.",
"surname": "De Floriani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Istituto per la Matematica Applicata, Genova, Italy",
"fullName": "E. Puppo",
"givenName": "E.",
"surname": "Puppo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1988-01-01T00:00:00",
"pubType": "proceedings",
"pages": "566,567,568,569",
"year": "1988",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00028292",
"articleId": "12OmNzUxOjG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00028294",
"articleId": "12OmNBKEyv8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isvd/2006/2630/0/26300025",
"title": "On the Stretch Factor of the Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2006/26300025/12OmNAIvcYb",
"parentPublication": {
"id": "proceedings/isvd/2006/2630/0",
"title": "2006 3rd International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2011/4353/2/05750952",
"title": "Image Completion Using Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2011/05750952/12OmNBNM8Sj",
"parentPublication": {
"id": "icicta/2011/4353/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/1/3571a248",
"title": "An Intelligent Method of Detecting Multi-factors Neighborhood Relation Based On Constrained Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571a248/12OmNvDZF7c",
"parentPublication": {
"id": "gcis/2009/3571/1",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2012/1910/0/06257653",
"title": "Localizing the Delaunay Triangulation and its Parallel Implementation",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2012/06257653/12OmNwoxSe8",
"parentPublication": {
"id": "proceedings/isvd/2012/1910/0",
"title": "2012 Ninth International Symposium on Voronoi Diagrams in Science and Engineering (ISVD 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2011/4483/0/4483a160",
"title": "Global Surface Remeshing Using Symmetric Delaunay Triangulation in Uniformization Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2011/4483a160/12OmNwudQMl",
"parentPublication": {
"id": "proceedings/isvd/2011/4483/0",
"title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460309",
"title": "Genus refinement of a manifold surface reconstructed by sculpting the 3d-Delaunay triangulation of Structure-from-Motion points",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460309/12OmNypIYwx",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/1999/0210/0/02100147",
"title": "The Delaunay Constrained Triangulation: The Delaunay Stable Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100147/12OmNzvQI95",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/05/mcg1995050062",
"title": "Delaunay Triangulation in Three Dimensions",
"doi": null,
"abstractUrl": "/magazine/cg/1995/05/mcg1995050062/13rRUwInvMz",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050736",
"title": "Computing 2D Constrained Delaunay Triangulation Using the GPU",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050736/13rRUxASuSM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbk/2019/4607/0/460700a167",
"title": "Nonparametric Functional Approximation with Delaunay Triangulation Learner",
"doi": null,
"abstractUrl": "/proceedings-article/icbk/2019/460700a167/1grN67I14JO",
"parentPublication": {
"id": "proceedings/icbk/2019/4607/0",
"title": "2019 IEEE International Conference on Big Knowledge (ICBK)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx8wTf5",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"acronym": "icetet",
"groupId": "1002112",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxwWoUv",
"doi": "10.1109/ICETET.2008.253",
"title": "Local Delaunay Triangulation for Mobile Nodes",
"normalizedTitle": "Local Delaunay Triangulation for Mobile Nodes",
"abstract": "Mobility is one of the challenging task in the routing of wireless ad hoc networks. The spanner local Delaunay triangulation (LDel) may not preserve its geometric properties if the nodes in the network move around which leads to the degradation of network performance. In this paper, we propose a spanner called mobile local Delaunay triangulation (MLDel) which performs better even at high mobility conditions and also maintains Delaunay properties. To evaluate the performance of MLDel, the simulation is performed using network simulator (ns-2.28). The simulation results show that MLDel gives better performance than LDel.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mobility is one of the challenging task in the routing of wireless ad hoc networks. The spanner local Delaunay triangulation (LDel) may not preserve its geometric properties if the nodes in the network move around which leads to the degradation of network performance. In this paper, we propose a spanner called mobile local Delaunay triangulation (MLDel) which performs better even at high mobility conditions and also maintains Delaunay properties. To evaluate the performance of MLDel, the simulation is performed using network simulator (ns-2.28). The simulation results show that MLDel gives better performance than LDel.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mobility is one of the challenging task in the routing of wireless ad hoc networks. The spanner local Delaunay triangulation (LDel) may not preserve its geometric properties if the nodes in the network move around which leads to the degradation of network performance. In this paper, we propose a spanner called mobile local Delaunay triangulation (MLDel) which performs better even at high mobility conditions and also maintains Delaunay properties. To evaluate the performance of MLDel, the simulation is performed using network simulator (ns-2.28). The simulation results show that MLDel gives better performance than LDel.",
"fno": "3267a282",
"keywords": [
"Ad Hoc Network",
"Geometric Spanner",
"Local Delaunay Triangulation",
"Mobility"
],
"authors": [
{
"affiliation": null,
"fullName": "D. Satyanarayana",
"givenName": "D.",
"surname": "Satyanarayana",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "S.V. Rao",
"givenName": "S.V.",
"surname": "Rao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icetet",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-07-01T00:00:00",
"pubType": "proceedings",
"pages": "282-287",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3267-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3267a276",
"articleId": "12OmNxy4N3k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3267a373",
"articleId": "12OmNqH9hqV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/mass/2012/2433/0/06502544",
"title": "On the spanning ratio of partial Delaunay triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2012/06502544/12OmNBhpS1z",
"parentPublication": {
"id": "proceedings/mass/2012/2433/0",
"title": "2012 IEEE 9th International Conference on Mobile Ad-Hoc and Sensor Systems (MASS 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2011/4428/0/4428a052",
"title": "A Parallel 3D Delaunay Triangulation Method",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2011/4428a052/12OmNrIaehr",
"parentPublication": {
"id": "proceedings/ispa/2011/4428/0",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccrd/2010/4043/0/4043a326",
"title": "Delaunay Triangulation Based Three Dimensional Anatomical Facial Reconstruction from 2D CT Slices",
"doi": null,
"abstractUrl": "/proceedings-article/iccrd/2010/4043a326/12OmNvTk00T",
"parentPublication": {
"id": "proceedings/iccrd/2010/4043/0",
"title": "Computer Research and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2010/4190/0/4190a224",
"title": "The Merge Phase of Parallel Divide-and-Conquer Scheme for 3D Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2010/4190a224/12OmNwDj12p",
"parentPublication": {
"id": "proceedings/ispa/2010/4190/0",
"title": "International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciis/1999/0446/0/04460452",
"title": "Fingerprint Identification Using Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/iciis/1999/04460452/12OmNwNwzFJ",
"parentPublication": {
"id": "proceedings/iciis/1999/0446/0",
"title": "Information, Intelligence, and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2011/4483/0/4483a160",
"title": "Global Surface Remeshing Using Symmetric Delaunay Triangulation in Uniformization Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2011/4483a160/12OmNwudQMl",
"parentPublication": {
"id": "proceedings/isvd/2011/4483/0",
"title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2011/4483/0/4483a039",
"title": "Isotropic Mesh Simplification by Evolving the Geodesic Delaunay Triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2011/4483a039/12OmNwwd2VI",
"parentPublication": {
"id": "proceedings/isvd/2011/4483/0",
"title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/1/3987a447",
"title": "Algorithm of the Delaunay Triangulation Net Interpolated Feature Points for Borehole Data",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987a447/12OmNy6Zs45",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/1",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/1999/0210/0/02100147",
"title": "The Delaunay Constrained Triangulation: The Delaunay Stable Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100147/12OmNzvQI95",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/05/mcg1995050062",
"title": "Delaunay Triangulation in Three Dimensions",
"doi": null,
"abstractUrl": "/magazine/cg/1995/05/mcg1995050062/13rRUwInvMz",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1eLxZSVsOLC",
"title": "2019 28th International Conference on Parallel Architectures and Compilation Techniques (PACT)",
"acronym": "pact",
"groupId": "1000535",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1eLy400Tb2g",
"doi": "10.1109/PACT.2019.00039",
"title": "Computing Three-Dimensional Constrained Delaunay Refinement Using the GPU",
"normalizedTitle": "Computing Three-Dimensional Constrained Delaunay Refinement Using the GPU",
"abstract": "We propose the first GPU algorithm for the 3D constrained Delaunay refinement problem. For an input of a piecewise linear complex G and a constant B, it produces, by adding Steiner points, a constrained Delaunay triangulation conforming to G and containing tetrahedra mostly with radiusedge ratios smaller than B. Our implementation of the algorithm shows that it can be an order of magnitude faster than the best CPU software while using similar quantities of Steiner points to produce triangulations of comparable qualities. It thus reduces the computing time of triangulation refinement from possibly an hour to a few seconds or minutes for possible use in interactive applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose the first GPU algorithm for the 3D constrained Delaunay refinement problem. For an input of a piecewise linear complex G and a constant B, it produces, by adding Steiner points, a constrained Delaunay triangulation conforming to G and containing tetrahedra mostly with radiusedge ratios smaller than B. Our implementation of the algorithm shows that it can be an order of magnitude faster than the best CPU software while using similar quantities of Steiner points to produce triangulations of comparable qualities. It thus reduces the computing time of triangulation refinement from possibly an hour to a few seconds or minutes for possible use in interactive applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose the first GPU algorithm for the 3D constrained Delaunay refinement problem. For an input of a piecewise linear complex G and a constant B, it produces, by adding Steiner points, a constrained Delaunay triangulation conforming to G and containing tetrahedra mostly with radiusedge ratios smaller than B. Our implementation of the algorithm shows that it can be an order of magnitude faster than the best CPU software while using similar quantities of Steiner points to produce triangulations of comparable qualities. It thus reduces the computing time of triangulation refinement from possibly an hour to a few seconds or minutes for possible use in interactive applications.",
"fno": "361300a409",
"keywords": [
"Computational Geometry",
"Mesh Generation",
"Three Dimensional Constrained Delaunay Refinement",
"GPU Algorithm",
"3 D Constrained Delaunay Refinement Problem",
"Steiner Points",
"Delaunay Triangulation",
"Graphics Processing Units",
"Steiner Trees",
"Instruction Sets",
"Multicore Processing",
"Software Algorithms",
"Data Structures",
"GPGPU",
"Computational Geometry",
"Mesh Refinement",
"Finite Element Analysis"
],
"authors": [
{
"affiliation": "National University of Singapore",
"fullName": "Zhenghai Chen",
"givenName": "Zhenghai",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Singapore",
"fullName": "Tiow-Seng Tan",
"givenName": "Tiow-Seng",
"surname": "Tan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pact",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-09-01T00:00:00",
"pubType": "proceedings",
"pages": "409-420",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3613-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "361300a395",
"articleId": "1eLy294LuyA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "361300a421",
"articleId": "1eLy3GzmLCg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itng/2014/3187/0/06822261",
"title": "Stability Aware Delaunay Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2014/06822261/12OmNBqMDfK",
"parentPublication": {
"id": "proceedings/itng/2014/3187/0",
"title": "2014 Eleventh International Conference on Information Technology: New Generations (ITNG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2012/1910/0/06257653",
"title": "Localizing the Delaunay Triangulation and its Parallel Implementation",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2012/06257653/12OmNwoxSe8",
"parentPublication": {
"id": "proceedings/isvd/2012/1910/0",
"title": "2012 Ninth International Symposium on Voronoi Diagrams in Science and Engineering (ISVD 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028293",
"title": "Constrained Delaunay triangulation for multiresolution surface description",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028293/12OmNxR5USd",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispan-fcst-iscc/2017/0840/0/0840a520",
"title": "More on the Inverse Problem of Steiner Wiener Index",
"doi": null,
"abstractUrl": "/proceedings-article/ispan-fcst-iscc/2017/0840a520/12OmNy2Jt7E",
"parentPublication": {
"id": "proceedings/ispan-fcst-iscc/2017/0840/0",
"title": "2017 14th International Symposium on Pervasive Systems, Algorithms and Networks & 2017 11th International Conference on Frontier of Computer Science and Technology & 2017 Third International Symposium of Creative Computing (ISPAN-FCST-ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipps/1994/5602/0/0288190",
"title": "An optimal mesh computer algorithm for constrained Delaunay triangulation",
"doi": null,
"abstractUrl": "/proceedings-article/ipps/1994/0288190/12OmNyRxFrc",
"parentPublication": {
"id": "proceedings/ipps/1994/5602/0",
"title": "Parallel Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460309",
"title": "Genus refinement of a manifold surface reconstructed by sculpting the 3d-Delaunay triangulation of Structure-from-Motion points",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460309/12OmNypIYwx",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/1999/0210/0/02100147",
"title": "The Delaunay Constrained Triangulation: The Delaunay Stable Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100147/12OmNzvQI95",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/05/mcg1995050062",
"title": "Delaunay Triangulation in Three Dimensions",
"doi": null,
"abstractUrl": "/magazine/cg/1995/05/mcg1995050062/13rRUwInvMz",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050736",
"title": "Computing 2D Constrained Delaunay Triangulation Using the GPU",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050736/13rRUxASuSM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvris/2020/9636/0/963600b063",
"title": "Optimal solution analysis of octagonal Steiner tree problem based on GPU acceleration",
"doi": null,
"abstractUrl": "/proceedings-article/icvris/2020/963600b063/1x4Z5rZqVUI",
"parentPublication": {
"id": "proceedings/icvris/2020/9636/0",
"title": "2020 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAsTgXc",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs4S8L5",
"doi": "10.1109/ICCVW.2011.6130334",
"title": "Illumination estimation from shadow borders",
"normalizedTitle": "Illumination estimation from shadow borders",
"abstract": "In this paper we discuss illumination estimation from a single image in general scenes and associate it with the existence of shadow edges, avoiding several pitfalls that burden previous illumination estimation approaches, which rely on associating a parametrization of illumination with the per pixel intensity of shadows or shading. We show a way to couple shadow and illumination estimation, relying only on the subset of shadow edges that is relevant to the provided geometry. In our approach, illumination estimation is posed as the minimization of an energy function that penalizes the matching between the expected shadow outline and observed image edges. Minimizing this energy function is strongly tied to selecting the appropriate set of potential shadow edges in the image. Our approach leads to an illumination estimation algorithm that performs on par with or better than the state of the art, even when scene geometry knowledge is limited, while having much lower computational complexity than state-of-the-art methods. We demonstrate the effectiveness of this approach both with quantitative results on synthetic data and qualitative evaluation on real images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we discuss illumination estimation from a single image in general scenes and associate it with the existence of shadow edges, avoiding several pitfalls that burden previous illumination estimation approaches, which rely on associating a parametrization of illumination with the per pixel intensity of shadows or shading. We show a way to couple shadow and illumination estimation, relying only on the subset of shadow edges that is relevant to the provided geometry. In our approach, illumination estimation is posed as the minimization of an energy function that penalizes the matching between the expected shadow outline and observed image edges. Minimizing this energy function is strongly tied to selecting the appropriate set of potential shadow edges in the image. Our approach leads to an illumination estimation algorithm that performs on par with or better than the state of the art, even when scene geometry knowledge is limited, while having much lower computational complexity than state-of-the-art methods. We demonstrate the effectiveness of this approach both with quantitative results on synthetic data and qualitative evaluation on real images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we discuss illumination estimation from a single image in general scenes and associate it with the existence of shadow edges, avoiding several pitfalls that burden previous illumination estimation approaches, which rely on associating a parametrization of illumination with the per pixel intensity of shadows or shading. We show a way to couple shadow and illumination estimation, relying only on the subset of shadow edges that is relevant to the provided geometry. In our approach, illumination estimation is posed as the minimization of an energy function that penalizes the matching between the expected shadow outline and observed image edges. Minimizing this energy function is strongly tied to selecting the appropriate set of potential shadow edges in the image. Our approach leads to an illumination estimation algorithm that performs on par with or better than the state of the art, even when scene geometry knowledge is limited, while having much lower computational complexity than state-of-the-art methods. We demonstrate the effectiveness of this approach both with quantitative results on synthetic data and qualitative evaluation on real images.",
"fno": "06130334",
"keywords": [
"Computational Complexity",
"Computational Geometry",
"Image Matching",
"Lighting",
"Illumination Estimation",
"Shadow Borders",
"Shadow Edges",
"Shadow Per Pixel Intensity",
"Energy Function Minimization",
"Shadow Outline Matching",
"Observed Image Edge Matching",
"Geometry Knowledge",
"Computational Complexity",
"Lighting",
"Image Edge Detection",
"Geometry",
"Light Sources",
"Estimation",
"Three Dimensional Displays",
"Solid Modeling"
],
"authors": [
{
"affiliation": "Stony Brook University, NY, USA",
"fullName": "Alexandros Panagopoulos",
"givenName": "Alexandros",
"surname": "Panagopoulos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stony Brook University, NY, USA",
"fullName": "Tomás F. Yago Vicente",
"givenName": "Tomás F. Yago",
"surname": "Vicente",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stony Brook University, NY, USA",
"fullName": "Dimitris Samaras",
"givenName": "Dimitris",
"surname": "Samaras",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "798-805",
"year": "2011",
"issn": null,
"isbn": "978-1-4673-0063-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06130333",
"articleId": "12OmNxaNGkI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06130335",
"articleId": "12OmNyQ7FOF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2009/3992/0/05206764",
"title": "Illumination and spatially varying specular reflectance from a single view",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206764/12OmNARRYl8",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815035",
"title": "Face Image Illumination Transfer through Eye-Relit 3D Basis",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815035/12OmNAgoV7Y",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1999/0164/2/00790314",
"title": "Illumination distribution from brightness in shadows: Adaptive estimation of illumination distribution with unknown reflectance properties in shadow regions",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1999/00790314/12OmNvAiSCT",
"parentPublication": {
"id": "proceedings/iccv/1999/0164/2",
"title": "Proceedings of the Seventh IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206665",
"title": "Robust shadow and illumination estimation using a mixture model",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206665/12OmNx3q6Va",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460098",
"title": "Illumination estimation from shadow and incomplete object shape captured by an RGB-D camera",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460098/12OmNzXWZK0",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459333",
"title": "Shadow cameras: Reciprocal views from illumination masks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459333/12OmNzlUKuY",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/02/ttp2013020437",
"title": "Simultaneous Cast Shadows, Illumination and Geometry Inference Using Hypergraphs",
"doi": null,
"abstractUrl": "/journal/tp/2013/02/ttp2013020437/13rRUyfKIEo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a189",
"title": "Deep Consistent Illumination in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a189/1gyslmCJMjK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i077",
"title": "Lighthouse: Predicting Lighting Volumes for Spatially-Coherent Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i077/1m3omNjwpW0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a179",
"title": "DSNet: Deep Shadow Network for Illumination Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a179/1tuB590ZFDO",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwLOYSu",
"title": "2017 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvDqsQf",
"doi": "10.1109/CW.2017.54",
"title": "Voxel-Based Interactive Rendering of Translucent Materials under Area Lights Using Sparse Samples",
"normalizedTitle": "Voxel-Based Interactive Rendering of Translucent Materials under Area Lights Using Sparse Samples",
"abstract": "Interactive rendering of translucent materials in virtual worlds has always proved to be challenging. In our work, we develop a voxel illumination framework for translucent materials illuminated by area lights. Our voxel illumination framework consists of two voxel structures. They are the Enhanced Subsurface Light Propagation Volumes (ESLPV), which handles the local translucent material appearance and the Light Propagation Volumes (LPV), which handles indirect illumination for the entire scene. Using a set of sparsely distributed Poisson disk samples in the ESLPV and LPV, illumination can be gathered from area lights. A uniform set of Poisson disk samples on the translucent objects is resampled and chosen as Translucent Planar Lights (TPLs) and is used to distribute lighting from translucent objects into the LPV by an additional gathering process. Our technique allows for direct and indirect illuminations from highly scattering translucent materials to be rendered interactively under area lighting at good quality. We can achieve similar effects, such as scattered light illumination from translucent materials, when compared to offline renderers without precomputations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Interactive rendering of translucent materials in virtual worlds has always proved to be challenging. In our work, we develop a voxel illumination framework for translucent materials illuminated by area lights. Our voxel illumination framework consists of two voxel structures. They are the Enhanced Subsurface Light Propagation Volumes (ESLPV), which handles the local translucent material appearance and the Light Propagation Volumes (LPV), which handles indirect illumination for the entire scene. Using a set of sparsely distributed Poisson disk samples in the ESLPV and LPV, illumination can be gathered from area lights. A uniform set of Poisson disk samples on the translucent objects is resampled and chosen as Translucent Planar Lights (TPLs) and is used to distribute lighting from translucent objects into the LPV by an additional gathering process. Our technique allows for direct and indirect illuminations from highly scattering translucent materials to be rendered interactively under area lighting at good quality. We can achieve similar effects, such as scattered light illumination from translucent materials, when compared to offline renderers without precomputations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Interactive rendering of translucent materials in virtual worlds has always proved to be challenging. In our work, we develop a voxel illumination framework for translucent materials illuminated by area lights. Our voxel illumination framework consists of two voxel structures. They are the Enhanced Subsurface Light Propagation Volumes (ESLPV), which handles the local translucent material appearance and the Light Propagation Volumes (LPV), which handles indirect illumination for the entire scene. Using a set of sparsely distributed Poisson disk samples in the ESLPV and LPV, illumination can be gathered from area lights. A uniform set of Poisson disk samples on the translucent objects is resampled and chosen as Translucent Planar Lights (TPLs) and is used to distribute lighting from translucent objects into the LPV by an additional gathering process. Our technique allows for direct and indirect illuminations from highly scattering translucent materials to be rendered interactively under area lighting at good quality. We can achieve similar effects, such as scattered light illumination from translucent materials, when compared to offline renderers without precomputations.",
"fno": "2089a056",
"keywords": [
"Approximation Theory",
"Brightness",
"Interactive Systems",
"Light Propagation",
"Lighting",
"Ray Tracing",
"Rendering Computer Graphics",
"Translucent Materials",
"Area Lighting",
"Scattered Light Illumination",
"Interactive Rendering",
"Voxel Illumination Framework",
"Enhanced Subsurface Light Propagation Volumes",
"Local Translucent Material Appearance",
"Poisson Disk Samples",
"Translucent Objects",
"Translucent Planar Lights",
"Lighting",
"Rendering Computer Graphics",
"Scattering",
"Light Sources",
"Pipelines",
"Heuristic Algorithms",
"Translucent Materials",
"Area Lights",
"Direct Illumination",
"Indirect Illumination",
"Interactive Rendering",
"Virtual Worlds"
],
"authors": [
{
"affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore",
"fullName": "Ming Di Koa",
"givenName": "Ming",
"surname": "Di Koa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fraunhofer Singapore, Singapore, Singapore",
"fullName": "Henry Johan",
"givenName": "Henry",
"surname": "Johan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore",
"fullName": "Alexei Sourin",
"givenName": "Alexei",
"surname": "Sourin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-09-01T00:00:00",
"pubType": "proceedings",
"pages": "56-63",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2089-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2089a048",
"articleId": "12OmNyFU77Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2089a064",
"articleId": "12OmNybfr7z",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948407",
"title": "Delta Voxel Cone Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948407/12OmNxG1yH8",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tiiec/2013/5146/0/5146a103",
"title": "Smart Street Lights",
"doi": null,
"abstractUrl": "/proceedings-article/tiiec/2013/5146a103/12OmNzZEAAY",
"parentPublication": {
"id": "proceedings/tiiec/2013/5146/0",
"title": "2013 Texas Instruments India Educators' Conference (TIIEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761892",
"title": "Analysis of subsurface scattering under generic illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761892/12OmNzd7bV9",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528300",
"title": "Descattering of transmissive observation using Parallel High-Frequency Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528300/12OmNzmclka",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/03/mcg2013030066",
"title": "Double- and Multiple-Scattering Effects in Translucent Materials",
"doi": null,
"abstractUrl": "/magazine/cg/2013/03/mcg2013030066/13rRUIJcWfX",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/04/07755748",
"title": "Recovering Inner Slices of Layered Translucent Objects by Multi-Frequency Illumination",
"doi": null,
"abstractUrl": "/journal/tp/2017/04/07755748/13rRUxBa5t9",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/05/06671604",
"title": "Importance-Driven Accessory Lights Designfor Enhancing Local Shapes",
"doi": null,
"abstractUrl": "/journal/tg/2014/05/06671604/13rRUxBa5xj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/10/08093692",
"title": "Point-Based Rendering for Homogeneous Participating Media with Refractive Boundaries",
"doi": null,
"abstractUrl": "/journal/tg/2018/10/08093692/13rRUy0qnGq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08600345",
"title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09113332",
"title": "Stochastic Lightcuts for Sampling Many Lights",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09113332/1kxX2rlqpDa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCbCrVT",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvIxeXq",
"doi": "10.1109/CVPR.2014.501",
"title": "Exploiting Shading Cues in Kinect IR Images for Geometry Refinement",
"normalizedTitle": "Exploiting Shading Cues in Kinect IR Images for Geometry Refinement",
"abstract": "In this paper, we propose a method to refine geometry of 3D meshes from the Kinect fusion by exploiting shading cues captured from the infrared (IR) camera of Kinect. A major benefit of using the Kinect IR camera instead of a RGB camera is that the IR images captured by Kinect are narrow band images which filtered out most undesired ambient light that makes our system robust to natural indoor illumination. We define a near light IR shading model which describes the captured intensity as a function of surface normals, albedo, lighting direction, and distance between a light source and surface points. To resolve ambiguity in our model between normals and distance, we utilize an initial 3D mesh from the Kinect fusion and multi-view information to reliably estimate surface details that were not reconstructed by the Kinect fusion. Our approach directly operates on a 3D mesh model for geometry refinement. The effectiveness of our approach is demonstrated through several challenging real-world examples.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a method to refine geometry of 3D meshes from the Kinect fusion by exploiting shading cues captured from the infrared (IR) camera of Kinect. A major benefit of using the Kinect IR camera instead of a RGB camera is that the IR images captured by Kinect are narrow band images which filtered out most undesired ambient light that makes our system robust to natural indoor illumination. We define a near light IR shading model which describes the captured intensity as a function of surface normals, albedo, lighting direction, and distance between a light source and surface points. To resolve ambiguity in our model between normals and distance, we utilize an initial 3D mesh from the Kinect fusion and multi-view information to reliably estimate surface details that were not reconstructed by the Kinect fusion. Our approach directly operates on a 3D mesh model for geometry refinement. The effectiveness of our approach is demonstrated through several challenging real-world examples.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a method to refine geometry of 3D meshes from the Kinect fusion by exploiting shading cues captured from the infrared (IR) camera of Kinect. A major benefit of using the Kinect IR camera instead of a RGB camera is that the IR images captured by Kinect are narrow band images which filtered out most undesired ambient light that makes our system robust to natural indoor illumination. We define a near light IR shading model which describes the captured intensity as a function of surface normals, albedo, lighting direction, and distance between a light source and surface points. To resolve ambiguity in our model between normals and distance, we utilize an initial 3D mesh from the Kinect fusion and multi-view information to reliably estimate surface details that were not reconstructed by the Kinect fusion. Our approach directly operates on a 3D mesh model for geometry refinement. The effectiveness of our approach is demonstrated through several challenging real-world examples.",
"fno": "5118d922",
"keywords": [
"Cameras",
"Lighting",
"Geometry",
"Three Dimensional Displays",
"Light Sources",
"Mathematical Model",
"Estimation",
"Refinement",
"Kinect",
"IR",
"Infrared"
],
"authors": [
{
"affiliation": null,
"fullName": "Gyeongmin Choe",
"givenName": "Gyeongmin",
"surname": "Choe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jaesik Park",
"givenName": "Jaesik",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu-Wing Tai",
"givenName": "Yu-Wing",
"surname": "Tai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "In So Kweon",
"givenName": "In So",
"surname": "Kweon",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3922-3929",
"year": "2014",
"issn": "1063-6919",
"isbn": "978-1-4799-5118-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5118d914",
"articleId": "12OmNxR5US6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5118d930",
"articleId": "12OmNxYtu3w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391d370",
"title": "Polarized 3D: High-Quality Depth Sensing with Polarization Cues",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d370/12OmNApcuuU",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a143",
"title": "A Variational Study on BRDF Reconstruction in a Structured Light Scanner",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a143/12OmNBfqG3s",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2018/2526/0/08368465",
"title": "Near-light photometric stereo using circularly placed point light sources",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2018/08368465/12OmNqBbHSi",
"parentPublication": {
"id": "proceedings/iccp/2018/2526/0",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2016/0811/0/0811a084",
"title": "Reflectance Transformation Imaging Method for Large-Scale Objects",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2016/0811a084/12OmNsd6vp3",
"parentPublication": {
"id": "proceedings/cgiv/2016/0811/0",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206614",
"title": "A projector-camera setup for geometry-invariant frequency demultiplexing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206614/12OmNvoWV1H",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130388",
"title": "I spy with my little eye: Learning optimal filters for cross-modal stereo under projected patterns",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130388/12OmNwCJOFm",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c751",
"title": "Hybrid Kinect Depth Map Refinement for Transparent Objects",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c751/12OmNxveNNV",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130328",
"title": "Photometric stereo with auto-radiometric calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130328/12OmNyPQ4A9",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2015/9942/0/9942a332",
"title": "Effectiveness Comparison of Kinect and Kinect 2 for Recognition of Oyama Karate Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2015/9942a332/12OmNzmclUU",
"parentPublication": {
"id": "proceedings/nbis/2015/9942/0",
"title": "2015 18th International Conference on Network-Based Information Systems (NBiS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/09/09403910",
"title": "Surface Normals and Light Directions From Shading and Polarization",
"doi": null,
"abstractUrl": "/journal/tp/2022/09/09403910/1sLH9jSRKKc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy4r3R2",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvoWV1H",
"doi": "10.1109/CVPR.2009.5206614",
"title": "A projector-camera setup for geometry-invariant frequency demultiplexing",
"normalizedTitle": "A projector-camera setup for geometry-invariant frequency demultiplexing",
"abstract": "Consider a projector-camera setup where a sinusoidal pattern is projected onto the scene, and an image of the objects imprinted with the pattern is captured by the camera. In this configuration, the local frequency of the sinusoidal pattern as seen by the camera is a function of both the frequency of the projected sinusoid and the local geometry of objects in the scene. We observe that, by strategically placing the projector and the camera in canonical configuration and projecting sinusoidal patterns aligned with the epipolar lines, the frequency of the sinusoids seen in the image becomes invariant to the local object geometry. This property allows us to design systems composed of a camera and multiple projectors, which can be used to capture a single image of a scene illuminated by all projectors at the same time, and then demultiplex the frequencies generated by each individual projector separately. We show how imaging systems like those can be used to segment, from a single image, the shadows cast by each individual projector - an application that we call coded shadow photography. The method is useful to extend the applicability of techniques that rely on the analysis of shadows cast by multiple light sources placed at different positions, as the individual shadows captured at distinct instants of time now can be obtained from a single shot, enabling the processing of dynamic scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Consider a projector-camera setup where a sinusoidal pattern is projected onto the scene, and an image of the objects imprinted with the pattern is captured by the camera. In this configuration, the local frequency of the sinusoidal pattern as seen by the camera is a function of both the frequency of the projected sinusoid and the local geometry of objects in the scene. We observe that, by strategically placing the projector and the camera in canonical configuration and projecting sinusoidal patterns aligned with the epipolar lines, the frequency of the sinusoids seen in the image becomes invariant to the local object geometry. This property allows us to design systems composed of a camera and multiple projectors, which can be used to capture a single image of a scene illuminated by all projectors at the same time, and then demultiplex the frequencies generated by each individual projector separately. We show how imaging systems like those can be used to segment, from a single image, the shadows cast by each individual projector - an application that we call coded shadow photography. The method is useful to extend the applicability of techniques that rely on the analysis of shadows cast by multiple light sources placed at different positions, as the individual shadows captured at distinct instants of time now can be obtained from a single shot, enabling the processing of dynamic scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Consider a projector-camera setup where a sinusoidal pattern is projected onto the scene, and an image of the objects imprinted with the pattern is captured by the camera. In this configuration, the local frequency of the sinusoidal pattern as seen by the camera is a function of both the frequency of the projected sinusoid and the local geometry of objects in the scene. We observe that, by strategically placing the projector and the camera in canonical configuration and projecting sinusoidal patterns aligned with the epipolar lines, the frequency of the sinusoids seen in the image becomes invariant to the local object geometry. This property allows us to design systems composed of a camera and multiple projectors, which can be used to capture a single image of a scene illuminated by all projectors at the same time, and then demultiplex the frequencies generated by each individual projector separately. We show how imaging systems like those can be used to segment, from a single image, the shadows cast by each individual projector - an application that we call coded shadow photography. The method is useful to extend the applicability of techniques that rely on the analysis of shadows cast by multiple light sources placed at different positions, as the individual shadows captured at distinct instants of time now can be obtained from a single shot, enabling the processing of dynamic scenes.",
"fno": "05206614",
"keywords": [
"Computer Vision",
"Image Segmentation",
"Photography",
"Projector Camera Setup",
"Geometry Invariant Frequency Demultiplexing",
"Sinusoidal Pattern",
"Coded Shadow Photography",
"Frequency",
"Demultiplexing",
"Layout",
"Cameras",
"Geometry",
"Lighting",
"Image Segmentation",
"Photography",
"Light Sources",
"Computer Vision"
],
"authors": [
{
"affiliation": "UC Santa Barbara, USA",
"fullName": "Daniel A. Vaquero",
"givenName": "Daniel A.",
"surname": "Vaquero",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT Media Lab, USA",
"fullName": "Ramesh Raskary",
"givenName": "Ramesh",
"surname": "Raskary",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IBM Research, USA",
"fullName": "Rogerio S. Feris",
"givenName": "Rogerio S.",
"surname": "Feris",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UC Santa Barbara, USA",
"fullName": "Matthew Turk",
"givenName": "Matthew",
"surname": "Turk",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2082-2089",
"year": "2009",
"issn": "1063-6919",
"isbn": "978-1-4244-3992-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05206613",
"articleId": "12OmNx76TJo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05206615",
"articleId": "12OmNvvLi50",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460424",
"title": "Coded aperture for projector and camera for robust 3D measurement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460424/12OmNBpVQ2Y",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a815",
"title": "Efficient Separation Between Projected Patterns for Multiple Projector 3D People Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a815/12OmNwcCINM",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a464",
"title": "Simple, Accurate, and Robust Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204341",
"title": "Shadow multiplexing for real-time silhouette extraction",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204341/12OmNyoiZ7z",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a039",
"title": "Towards Illumination-Invariant 3D Reconstruction Using ToF RGB-D Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a039/12OmNzT7OpK",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270467",
"title": "Real-Time Projector Tracking on Complex Geometry Using Ordinary Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270467/12OmNzmcll1",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/02/i0362",
"title": "Polarization Multiplexing and Demultiplexing for Appearance-Based Modeling",
"doi": null,
"abstractUrl": "/journal/tp/2007/02/i0362/13rRUxNW20k",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b923",
"title": "Multispectral Direct-Global Separation of Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b923/18j8GnDjoWs",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798245",
"title": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798245/1cI6ar8DdyE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797714",
"title": "PILC Projector: RGB-IR Projector for Pixel-level Infrared Light Communication",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797714/1cJ0L8WggAE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzIUg0M",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"acronym": "icig",
"groupId": "1001790",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwoPtun",
"doi": "10.1109/ICIG.2013.185",
"title": "Cartoon Rendering Illumination Model Based on Phong",
"normalizedTitle": "Cartoon Rendering Illumination Model Based on Phong",
"abstract": "3D cartoon rendering has broad application prospect in games, movies and cartoons. To this end, this paper introduces a cartoon rendering illumination model improved from Phong illumination model, and realizes it with 3ds Max SDK in the form of plug-ins. By discretizing diffuse reflection part and specular reflection part in the Phong illumination model, surfaces of models show different blocks of color, among which there are clear boundaries. In addition, by using linear interpolation between different color blocks, boundaries can be soften. The experimental results show that this cartoon rendering illumination model can not only make a cartoon appearance, but also mix with Phong illumination model, and achieve a unique effect.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D cartoon rendering has broad application prospect in games, movies and cartoons. To this end, this paper introduces a cartoon rendering illumination model improved from Phong illumination model, and realizes it with 3ds Max SDK in the form of plug-ins. By discretizing diffuse reflection part and specular reflection part in the Phong illumination model, surfaces of models show different blocks of color, among which there are clear boundaries. In addition, by using linear interpolation between different color blocks, boundaries can be soften. The experimental results show that this cartoon rendering illumination model can not only make a cartoon appearance, but also mix with Phong illumination model, and achieve a unique effect.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D cartoon rendering has broad application prospect in games, movies and cartoons. To this end, this paper introduces a cartoon rendering illumination model improved from Phong illumination model, and realizes it with 3ds Max SDK in the form of plug-ins. By discretizing diffuse reflection part and specular reflection part in the Phong illumination model, surfaces of models show different blocks of color, among which there are clear boundaries. In addition, by using linear interpolation between different color blocks, boundaries can be soften. The experimental results show that this cartoon rendering illumination model can not only make a cartoon appearance, but also mix with Phong illumination model, and achieve a unique effect.",
"fno": "5050a913",
"keywords": [
"Lighting",
"Reflection",
"Rendering Computer Graphics",
"Color",
"Light Sources",
"Mathematical Model",
"Vectors",
"Phong",
"NPR",
"Toon Shading"
],
"authors": [
{
"affiliation": null,
"fullName": "Shaohao Wang",
"givenName": "Shaohao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yurui Wei",
"givenName": "Yurui",
"surname": "Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chengying Gao",
"givenName": "Chengying",
"surname": "Gao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icig",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-07-01T00:00:00",
"pubType": "proceedings",
"pages": "913-919",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5050-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5050a909",
"articleId": "12OmNwJPMWu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5050a920",
"articleId": "12OmNzcPA1H",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2009/3992/0/05206764",
"title": "Illumination and spatially varying specular reflectance from a single view",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206764/12OmNARRYl8",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2008/2153/0/04813364",
"title": "Model-based reconstruction for illumination variation in face images",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813364/12OmNAYXWLp",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156382",
"title": "Efficient volume illumination with multiple light sources through selective light updates",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156382/12OmNvDZF6A",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a056",
"title": "Voxel-Based Interactive Rendering of Translucent Materials under Area Lights Using Sparse Samples",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a056/12OmNvDqsQf",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011856",
"title": "Real-time rendering with complex natural illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011856/12OmNweTvQm",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbmcv/1995/7021/0/00514684",
"title": "Reflectance analysis under solar illumination",
"doi": null,
"abstractUrl": "/proceedings-article/pbmcv/1995/00514684/12OmNxbW4O4",
"parentPublication": {
"id": "proceedings/pbmcv/1995/7021/0",
"title": "Proceedings of the Workshop on Physics-Based Modeling in Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761433",
"title": "Illumination Transition Image: Parameter-based Illumination Estimation and Re-rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761433/12OmNzVXNU8",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/05/09712406",
"title": "Predicting Surface Reflectance Properties of Outdoor Scenes Under Unknown Natural Illumination",
"doi": null,
"abstractUrl": "/magazine/cg/2022/05/09712406/1AZLEpMIeME",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09887904",
"title": "Fast and Accurate Illumination Estimation Using LDR Panoramic Images for Realistic Rendering",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09887904/1GBRnHyZ1bW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09904431",
"title": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09904431/1H0GdxnVnws",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCaLEmR",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"acronym": "iccis",
"groupId": "1800262",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAY79d9",
"doi": "10.1109/ICCIS.2012.115",
"title": "Data-Driven Based Interactive Motion Blending",
"normalizedTitle": "Data-Driven Based Interactive Motion Blending",
"abstract": "Motion blending is an important techonology of character animation, which produces new motion by combining two or more motion clips according to blending parameters. But previous methods can hardly generate the convincing motion in real time , and require too much manual intervention and preprocessing. In this paper, we introduce a new time synchronization algorithm, which represents data based on bone levels instead of joint levels, applying curve simplified algorithm to time synchronization of motion blending. Extracted time synchronization points are used for grading time synchronization of motion blending. After that, we do the motion blending by aligning coordinate, interpolation and recreating root joint. Finally we develop different kinds of motion blending generators, and design and perform visual editor for motion blending. It is convenient for users to adjust blending parameters on line and observe blending effects. Our method is efficient enough and simple for achieving good blending effects without complicated preprocessing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion blending is an important techonology of character animation, which produces new motion by combining two or more motion clips according to blending parameters. But previous methods can hardly generate the convincing motion in real time , and require too much manual intervention and preprocessing. In this paper, we introduce a new time synchronization algorithm, which represents data based on bone levels instead of joint levels, applying curve simplified algorithm to time synchronization of motion blending. Extracted time synchronization points are used for grading time synchronization of motion blending. After that, we do the motion blending by aligning coordinate, interpolation and recreating root joint. Finally we develop different kinds of motion blending generators, and design and perform visual editor for motion blending. It is convenient for users to adjust blending parameters on line and observe blending effects. Our method is efficient enough and simple for achieving good blending effects without complicated preprocessing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion blending is an important techonology of character animation, which produces new motion by combining two or more motion clips according to blending parameters. But previous methods can hardly generate the convincing motion in real time , and require too much manual intervention and preprocessing. In this paper, we introduce a new time synchronization algorithm, which represents data based on bone levels instead of joint levels, applying curve simplified algorithm to time synchronization of motion blending. Extracted time synchronization points are used for grading time synchronization of motion blending. After that, we do the motion blending by aligning coordinate, interpolation and recreating root joint. Finally we develop different kinds of motion blending generators, and design and perform visual editor for motion blending. It is convenient for users to adjust blending parameters on line and observe blending effects. Our method is efficient enough and simple for achieving good blending effects without complicated preprocessing.",
"fno": "4789a530",
"keywords": [
"Animation",
"Synchronization",
"Joints",
"Bones",
"Generators",
"Vectors",
"Trajectory",
"Real Time Interactive",
"Character Animation",
"Motion Blending",
"Time Synchronization"
],
"authors": [
{
"affiliation": null,
"fullName": "Ruijiao Tian",
"givenName": "Ruijiao",
"surname": "Tian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yue Cao",
"givenName": "Yue",
"surname": "Cao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xin Li",
"givenName": "Xin",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chen Tang",
"givenName": "Chen",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qingxin Zhu",
"givenName": "Qingxin",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-08-01T00:00:00",
"pubType": "proceedings",
"pages": "530-533",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2406-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4789a526",
"articleId": "12OmNxGja6w",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4789a534",
"articleId": "12OmNBJw9Qm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-graphics/2013/2576/0/06814974",
"title": "A Data-Driven Approach to Efficient Character Articulation",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06814974/12OmNAle6tC",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2011/4501/0/4501a955",
"title": "GPU-based Motion Blending for Motion Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2011/4501a955/12OmNBcShTc",
"parentPublication": {
"id": "proceedings/iccis/2011/4501/0",
"title": "2011 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2011/4589/0/4589a577",
"title": "Real-Time Stereo Mosaicing Using Feature Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2011/4589a577/12OmNvSKNZ3",
"parentPublication": {
"id": "proceedings/ism/2011/4589/0",
"title": "2011 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759447",
"title": "Continual surface-based multi-projector blending for moving objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759447/12OmNvStcQS",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isbim/2008/3560/2/3560b038",
"title": "Mine Ore Blending Planning and Management Based on the Fuzzy Multi-objective Optimization Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/isbim/2008/3560b038/12OmNyugyG8",
"parentPublication": {
"id": "proceedings/isbim/2008/3560/2",
"title": "Business and Information Management, International Seminar on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771381",
"title": "The human motion database: A cognitive and parametric sampling of human motion",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771381/12OmNyyO8NC",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2006/2754/0/27540626",
"title": "A Motion Blending Approach Based on Unsupervised Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2006/27540626/12OmNzzfTlW",
"parentPublication": {
"id": "proceedings/icat/2006/2754/0",
"title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122122",
"title": "A Data-Driven Approach to Hue-Preserving Color-Blending",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122122/13rRUwd9CLL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050762",
"title": "Interactive Animation of 4D Performance Capture",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050762/13rRUxOve9H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/02/mcg1995020044",
"title": "Shape Blending Using the Star-Skeleton Representation",
"doi": null,
"abstractUrl": "/magazine/cg/1995/02/mcg1995020044/13rRUygT7Ag",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC3Xhik",
"title": "2011 IEEE/ACM 15th International Symposium on Distributed Simulation and Real Time Applications",
"acronym": "ds-rt",
"groupId": "1000218",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvA1heq",
"doi": "10.1109/DS-RT.2011.27",
"title": "4D Performance Modelling and Animation",
"normalizedTitle": "4D Performance Modelling and Animation",
"abstract": "Visual reconstruction of dynamic events as 3D video, such as an actor performance or sports action, has advanced to the stage where it is possible to achieve free-viewpoint replay with a quality approaching the captured video. In this talk we present research going beyond replay to allow the creation of 4D models which support interactive animation control from captured performance whilst maintaining the realism of video. 4D models are constructed by alignment of reconstructed mesh sequences into a temporally coherent structure. Recent work has introduced a non-sequential approach to non-rigid mesh sequence alignment which constructs a shape similarity tree to align across a database of multiple sequences. This avoids problems of drift and tracking failure associated with sequential alignment approaches. Temporally aligned 4D models provide the basis for parameterisation of multiple related sequences to give continuous interactive movement control. Representation of multiple sequences in a 4D parametric motion graph enables transition between multiple motions to achieve interactive character animation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual reconstruction of dynamic events as 3D video, such as an actor performance or sports action, has advanced to the stage where it is possible to achieve free-viewpoint replay with a quality approaching the captured video. In this talk we present research going beyond replay to allow the creation of 4D models which support interactive animation control from captured performance whilst maintaining the realism of video. 4D models are constructed by alignment of reconstructed mesh sequences into a temporally coherent structure. Recent work has introduced a non-sequential approach to non-rigid mesh sequence alignment which constructs a shape similarity tree to align across a database of multiple sequences. This avoids problems of drift and tracking failure associated with sequential alignment approaches. Temporally aligned 4D models provide the basis for parameterisation of multiple related sequences to give continuous interactive movement control. Representation of multiple sequences in a 4D parametric motion graph enables transition between multiple motions to achieve interactive character animation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual reconstruction of dynamic events as 3D video, such as an actor performance or sports action, has advanced to the stage where it is possible to achieve free-viewpoint replay with a quality approaching the captured video. In this talk we present research going beyond replay to allow the creation of 4D models which support interactive animation control from captured performance whilst maintaining the realism of video. 4D models are constructed by alignment of reconstructed mesh sequences into a temporally coherent structure. Recent work has introduced a non-sequential approach to non-rigid mesh sequence alignment which constructs a shape similarity tree to align across a database of multiple sequences. This avoids problems of drift and tracking failure associated with sequential alignment approaches. Temporally aligned 4D models provide the basis for parameterisation of multiple related sequences to give continuous interactive movement control. Representation of multiple sequences in a 4D parametric motion graph enables transition between multiple motions to achieve interactive character animation.",
"fno": "06051804",
"keywords": [
"Computer Animation",
"Image Motion Analysis",
"Interactive Video",
"Mesh Generation",
"Solid Modelling",
"4 D Performance Modelling",
"Dynamic Event Visual Reconstruction",
"3 D Video",
"Free Viewpoint Replay",
"4 D Models",
"Interactive Animation Control",
"Reconstructed Mesh Sequences",
"Nonrigid Mesh Sequence Alignment",
"Shape Similarity Tree",
"Interactive Movement Control",
"4 D Parametric Motion Graph",
"Interactive Character Animation",
"Animation",
"Streaming Media",
"Solid Modeling",
"Tracking",
"Shape",
"Three Dimensional Displays",
"Databases"
],
"authors": [
{
"affiliation": null,
"fullName": "Adrian Hilton",
"givenName": "Adrian",
"surname": "Hilton",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ds-rt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "57-57",
"year": "2011",
"issn": "1550-6525",
"isbn": "978-1-4577-1643-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06051803",
"articleId": "12OmNwdtw7n",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06051805",
"articleId": "12OmNvAiSHm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/medivis/2006/2603/0/26030082",
"title": "Hierarchical Tracking of Intra-Cell Structures in 4D Images",
"doi": null,
"abstractUrl": "/proceedings-article/medivis/2006/26030082/12OmNqBtiZK",
"parentPublication": {
"id": "proceedings/medivis/2006/2603/0",
"title": "International Conference on Medical Information Visualisation - BioMedical Visualisation (MedVis'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2015/6683/0/6683a199",
"title": "Automatic 4D Facial Expression Recognition Using DCT Features",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683a199/12OmNqNG3gL",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/04/07124506",
"title": "Direct Visuo-Haptic 4D Volume Rendering Using Respiratory Motion Models",
"doi": null,
"abstractUrl": "/journal/th/2015/04/07124506/13rRUwInvfi",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050762",
"title": "Interactive Animation of 4D Performance Capture",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050762/13rRUxOve9H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08019847",
"title": "CasCADe: A Novel 4D Visualization System for Virtual Construction Planning",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08019847/13rRUxlgxTr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/03/mcg2018030131",
"title": "4D Cubism: Modeling, Animation, and Fabrication of Artistic Shapes",
"doi": null,
"abstractUrl": "/magazine/cg/2018/03/mcg2018030131/13rRUy3gmXD",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/02/09745790",
"title": "4D Atlas: Statistical Analysis of the Spatiotemporal Variability in Longitudinal 3D Shape Data",
"doi": null,
"abstractUrl": "/journal/tp/2023/02/09745790/1CbVkWyt0LC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042606",
"title": "The Florence 4D Facial Expression Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042606/1KOuYMNncyY",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2022/5351/0/535100a354",
"title": "An Image Encryption Scheme Based on 4D Chaotic System and Permutation-diffusion Operations",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2022/535100a354/1KYt45bWYOQ",
"parentPublication": {
"id": "proceedings/icnisc/2022/5351/0",
"title": "2022 8th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2020/1969/0/196900a665",
"title": "Dynamic Modeling of Interactive Scene in 3D Animation Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2020/196900a665/1wG5Vm0v6IU",
"parentPublication": {
"id": "proceedings/icris/2020/1969/0",
"title": "2020 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx2QUDD",
"title": "2015 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwFid1n",
"doi": "10.1109/3DV.2015.60",
"title": "Video Based Animation Synthesis with the Essential Graph",
"normalizedTitle": "Video Based Animation Synthesis with the Essential Graph",
"abstract": "We propose a method to generate animations using video-based mesh sequences of elementary movements of a shape. New motions that satisfy high-level user-specified constraints are built by recombining and interpolating the frames in the observed mesh sequences. The interest of video based meshes is to provide real full shape information and to enable therefore realistic shape animations. A resulting issue lies, however, in the difficulty to combine and interpolate human poses without a parametric pose model, as with skeleton based animations. To address this issue, our method brings two innovations that contribute at different levels: Locally between two motion sequences, we introduce a new approach to generate realistic transitions using dynamic time warping, More globally, over a set of motion sequences, we propose the essential graph as an efficient structure to encode the most realistic transitions between all pairs of input shape poses. Graph search in the essential graph allows then to generate realistic motions that are optimal with respect to various user-defined constraints. We present both quantitative and qualitative results on various 3D video datasets. They show that our approach compares favourably with previous strategies in this field that use the motion graph.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a method to generate animations using video-based mesh sequences of elementary movements of a shape. New motions that satisfy high-level user-specified constraints are built by recombining and interpolating the frames in the observed mesh sequences. The interest of video based meshes is to provide real full shape information and to enable therefore realistic shape animations. A resulting issue lies, however, in the difficulty to combine and interpolate human poses without a parametric pose model, as with skeleton based animations. To address this issue, our method brings two innovations that contribute at different levels: Locally between two motion sequences, we introduce a new approach to generate realistic transitions using dynamic time warping, More globally, over a set of motion sequences, we propose the essential graph as an efficient structure to encode the most realistic transitions between all pairs of input shape poses. Graph search in the essential graph allows then to generate realistic motions that are optimal with respect to various user-defined constraints. We present both quantitative and qualitative results on various 3D video datasets. They show that our approach compares favourably with previous strategies in this field that use the motion graph.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a method to generate animations using video-based mesh sequences of elementary movements of a shape. New motions that satisfy high-level user-specified constraints are built by recombining and interpolating the frames in the observed mesh sequences. The interest of video based meshes is to provide real full shape information and to enable therefore realistic shape animations. A resulting issue lies, however, in the difficulty to combine and interpolate human poses without a parametric pose model, as with skeleton based animations. To address this issue, our method brings two innovations that contribute at different levels: Locally between two motion sequences, we introduce a new approach to generate realistic transitions using dynamic time warping, More globally, over a set of motion sequences, we propose the essential graph as an efficient structure to encode the most realistic transitions between all pairs of input shape poses. Graph search in the essential graph allows then to generate realistic motions that are optimal with respect to various user-defined constraints. We present both quantitative and qualitative results on various 3D video datasets. They show that our approach compares favourably with previous strategies in this field that use the motion graph.",
"fno": "8332a478",
"keywords": [
"Animation",
"Motion Segmentation",
"Three Dimensional Displays",
"Shape",
"Interpolation",
"Joints"
],
"authors": [
{
"affiliation": null,
"fullName": "Adnane Boukhayma",
"givenName": "Adnane",
"surname": "Boukhayma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Edmond Boyer",
"givenName": "Edmond",
"surname": "Boyer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-10-01T00:00:00",
"pubType": "proceedings",
"pages": "478-486",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-8332-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8332a469",
"articleId": "12OmNzR8Cwl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8332a487",
"articleId": "12OmNz61dwY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icctd/2009/3892/2/3892b435",
"title": "Motion Graph for Character Animation: Design Considerations",
"doi": null,
"abstractUrl": "/proceedings-article/icctd/2009/3892b435/12OmNrGb2kv",
"parentPublication": {
"id": "proceedings/icctd/2009/3892/2",
"title": "Computer Technology and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b892",
"title": "Temporal Segmentation and Seamless Stitching of Motion Patterns for Synthesizing Novel Animations of Periodic Dances",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b892/12OmNvDZEQ3",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206626",
"title": "Human motion synthesis from 3D video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206626/12OmNwE9ONo",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177478",
"title": "Affect-expressive hand gestures synthesis and animation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177478/12OmNxGja0m",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2007/2996/0/29960003",
"title": "A Simple Framework for Natural Animation of Digitized Models",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2007/29960003/12OmNzIUfYb",
"parentPublication": {
"id": "proceedings/sibgrapi/2007/2996/0",
"title": "XX Brazilian Symposium on Computer Graphics and Image Processing (SIBGRAPI 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08352750",
"title": "Surface Motion Capture Animation Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08352750/13rRUwjXZSl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/06/v1523",
"title": "Expressive Facial Animation Synthesis by Learning Speech Coarticulation and Expression Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2006/06/v1523/13rRUxASubv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050762",
"title": "Interactive Animation of 4D Performance Capture",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050762/13rRUxOve9H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/04/mcg2017040030",
"title": "Data-Driven Approach to Synthesizing Facial Animation Using Motion Capture",
"doi": null,
"abstractUrl": "/magazine/cg/2017/04/mcg2017040030/13rRUyeTVkv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09551755",
"title": "A Music-Driven Deep Generative Adversarial Model for Guzheng Playing Animation",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09551755/1xgx3sOEUXS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx5YvrH",
"title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)",
"acronym": "icat",
"groupId": "1001485",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzzfTlW",
"doi": "10.1109/ICAT.2006.14",
"title": "A Motion Blending Approach Based on Unsupervised Clustering",
"normalizedTitle": "A Motion Blending Approach Based on Unsupervised Clustering",
"abstract": "Motion blending, which allows the generation of new motions by interpolation or transition between motion capture sequences, is widely accepted as a standard technique in computer animation. But the transition time and duration is manually chosen in traditional blending approaches and all frames of motion clips must be computed to select the right transition point. This paper presents a new motion blending method that can reduce computing work by extracting the key frames for selecting the transition point and can automatically select the transition time. To evaluate the effectiveness of the improved method, we have done extensive experiments. The experiment results show that the novel motion blending method is effective in smoothly blending between two motion sequences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion blending, which allows the generation of new motions by interpolation or transition between motion capture sequences, is widely accepted as a standard technique in computer animation. But the transition time and duration is manually chosen in traditional blending approaches and all frames of motion clips must be computed to select the right transition point. This paper presents a new motion blending method that can reduce computing work by extracting the key frames for selecting the transition point and can automatically select the transition time. To evaluate the effectiveness of the improved method, we have done extensive experiments. The experiment results show that the novel motion blending method is effective in smoothly blending between two motion sequences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion blending, which allows the generation of new motions by interpolation or transition between motion capture sequences, is widely accepted as a standard technique in computer animation. But the transition time and duration is manually chosen in traditional blending approaches and all frames of motion clips must be computed to select the right transition point. This paper presents a new motion blending method that can reduce computing work by extracting the key frames for selecting the transition point and can automatically select the transition time. To evaluate the effectiveness of the improved method, we have done extensive experiments. The experiment results show that the novel motion blending method is effective in smoothly blending between two motion sequences.",
"fno": "27540626",
"keywords": [
"Computer Animation",
"Interpolation",
"Pattern Clustering",
"Motion Blending Approach",
"Unsupervised Clustering",
"Motion Capture Sequences",
"Computer Animation",
"Motion Clips",
"Motion Sequences",
"Animation",
"Humans",
"Physics Computing",
"Educational Institutions",
"Mathematics",
"Interpolation",
"Games",
"Clustering Methods",
"Matched Filters",
"Kernel"
],
"authors": [
{
"affiliation": "Sch. of Comput. Eng. & Sci., ShanghaiUniversity, Shanghai",
"fullName": "Zhongyu Chen",
"givenName": "Zhongyu",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Comput. Eng. & Sci., ShanghaiUniversity, Shanghai",
"fullName": "Xiangbin Zhu",
"givenName": "Xiangbin",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icat",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-11-01T00:00:00",
"pubType": "proceedings",
"pages": "626-631",
"year": "2006",
"issn": null,
"isbn": "0-7695-2754-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04089315",
"articleId": "17D45WK5AoU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "27540576",
"articleId": "12OmNyv7m5P",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dmdcm/2011/4413/0/4413a150",
"title": "Fast Computation of Transition Points for Motion Graph",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a150/12OmNAHmOuI",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2012/4789/0/4789a530",
"title": "Data-Driven Based Interactive Motion Blending",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2012/4789a530/12OmNAY79d9",
"parentPublication": {
"id": "proceedings/iccis/2012/4789/0",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2011/4501/0/4501a955",
"title": "GPU-based Motion Blending for Motion Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2011/4501a955/12OmNBcShTc",
"parentPublication": {
"id": "proceedings/iccis/2011/4501/0",
"title": "2011 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2001/7237/0/00982378",
"title": "Constrained framespace interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2001/00982378/12OmNyFCvP3",
"parentPublication": {
"id": "proceedings/ca/2001/7237/0",
"title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a864",
"title": "Bivariate Symmetry Associated Continued Fractions Blending Rational Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a864/12OmNzh5z3R",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/02/mcg2015020070",
"title": "A Practical Model for Live Speech-Driven Lip-Sync",
"doi": null,
"abstractUrl": "/magazine/cg/2015/02/mcg2015020070/13rRUwInvLY",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2003/04/v0481",
"title": "Semantic Representation and Correspondence for State-Based Motion Transition",
"doi": null,
"abstractUrl": "/journal/tg/2003/04/v0481/13rRUxOdD2u",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/03/ttg2008030707",
"title": "Two-Character Motion Analysis and Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2008/03/ttg2008030707/13rRUxYrbM8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/02/mcg1995020044",
"title": "Shape Blending Using the Star-Skeleton Representation",
"doi": null,
"abstractUrl": "/magazine/cg/1995/02/mcg1995020044/13rRUygT7Ag",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09314221",
"title": "Deep Sketch-Guided Cartoon Video Inbetweening",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09314221/1q8UaZOmTsc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmLs4NuZAQ",
"doi": "10.1109/ICCV48922.2021.01115",
"title": "Learning Motion Priors for 4D Human Body Capture in 3D Scenes",
"normalizedTitle": "Learning Motion Priors for 4D Human Body Capture in 3D Scenes",
"abstract": "Recovering high-quality 3D human motion in complex scenes from monocular videos is important for many applications, ranging from AR/VR to robotics. However, capturing realistic human-scene interactions, while dealing with occlusions and partial views, is challenging; current approaches are still far from achieving compelling results. We address this problem by proposing LEMO: LEarning human MOtion priors for 4D human body capture. By leveraging the large-scale motion capture dataset AMASS [38], we introduce a novel motion smoothness prior, which strongly reduces the jitters exhibited by poses recovered over a sequence. Furthermore, to handle contacts and occlusions occurring frequently in body-scene interactions, we design a contact friction term and a contact-aware motion infiller obtained via per-instance self-supervised training. To prove the effectiveness of the proposed motion priors, we combine them into a novel pipeline for 4D human body capture in 3D scenes. With our pipeline, we demonstrate high-quality 4D human body capture, reconstructing smooth motions and physically plausible body-scene interactions. The code and data are available at https://sanweiliti.github.io/LEMO/LEMO.html.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recovering high-quality 3D human motion in complex scenes from monocular videos is important for many applications, ranging from AR/VR to robotics. However, capturing realistic human-scene interactions, while dealing with occlusions and partial views, is challenging; current approaches are still far from achieving compelling results. We address this problem by proposing LEMO: LEarning human MOtion priors for 4D human body capture. By leveraging the large-scale motion capture dataset AMASS [38], we introduce a novel motion smoothness prior, which strongly reduces the jitters exhibited by poses recovered over a sequence. Furthermore, to handle contacts and occlusions occurring frequently in body-scene interactions, we design a contact friction term and a contact-aware motion infiller obtained via per-instance self-supervised training. To prove the effectiveness of the proposed motion priors, we combine them into a novel pipeline for 4D human body capture in 3D scenes. With our pipeline, we demonstrate high-quality 4D human body capture, reconstructing smooth motions and physically plausible body-scene interactions. The code and data are available at https://sanweiliti.github.io/LEMO/LEMO.html.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recovering high-quality 3D human motion in complex scenes from monocular videos is important for many applications, ranging from AR/VR to robotics. However, capturing realistic human-scene interactions, while dealing with occlusions and partial views, is challenging; current approaches are still far from achieving compelling results. We address this problem by proposing LEMO: LEarning human MOtion priors for 4D human body capture. By leveraging the large-scale motion capture dataset AMASS [38], we introduce a novel motion smoothness prior, which strongly reduces the jitters exhibited by poses recovered over a sequence. Furthermore, to handle contacts and occlusions occurring frequently in body-scene interactions, we design a contact friction term and a contact-aware motion infiller obtained via per-instance self-supervised training. To prove the effectiveness of the proposed motion priors, we combine them into a novel pipeline for 4D human body capture in 3D scenes. With our pipeline, we demonstrate high-quality 4D human body capture, reconstructing smooth motions and physically plausible body-scene interactions. The code and data are available at https://sanweiliti.github.io/LEMO/LEMO.html.",
"fno": "281200l1323",
"keywords": [
"Training",
"Three Dimensional Displays",
"Friction",
"Pipelines",
"Dynamics",
"Jitter",
"Distance Measurement"
],
"authors": [
{
"affiliation": "ETH Zürich",
"fullName": "Siwei Zhang",
"givenName": "Siwei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich",
"fullName": "Yan Zhang",
"givenName": "Yan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft",
"fullName": "Federica Bogo",
"givenName": "Federica",
"surname": "Bogo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich",
"fullName": "Marc Pollefeys",
"givenName": "Marc",
"surname": "Pollefeys",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich",
"fullName": "Siyu Tang",
"givenName": "Siyu",
"surname": "Tang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "11323-11333",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200l1313",
"articleId": "1BmHnC3QE5q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200l1334",
"articleId": "1BmEHBnofwQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2007/1016/0/04284846",
"title": "Model-Based Markerless Human Body Motion Capture using Multiple Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284846/12OmNvmXJ37",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2009/3304/0/04912827",
"title": "Ubiquitous human body motion capture using micro-sensors",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2009/04912827/12OmNyO8tWJ",
"parentPublication": {
"id": "proceedings/percom/2009/3304/0",
"title": "2009 IEEE International Conference on Pervasive Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2009/3888/3/3888c098",
"title": "Modeling of Human Body for Animation by Micro-sensor Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2009/3888c098/12OmNzRZq1u",
"parentPublication": {
"id": "proceedings/kam/2009/3888/1",
"title": "Knowledge Acquisition and Modeling, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2008/3494/2/3494b489",
"title": "Motion Capture Based on Body Features",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b489/12OmNzzP5JD",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050762",
"title": "Interactive Animation of 4D Performance Capture",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050762/13rRUxOve9H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08708933",
"title": "UnstructuredFusion: Realtime 4D Geometry and Texture Reconstruction Using Commercial RGBD Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08708933/19Q3hT6JyUg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600t9333",
"title": "H4D: Human 4D Modeling by Learning Neural Compositional Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600t9333/1H0LzpvsrTi",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g782",
"title": "HSC4D: Human-centered 4D Scene Capture in Large-scale Indoor-outdoor Space Using Wearable IMUs and LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g782/1H0N79ME2Wc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0957",
"title": "Monocular Total Capture: Posing Face, Body, and Hands in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0957/1gys3pjGQfe",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a930",
"title": "4D Human Body Capture from Egocentric Video via 3D Scene Grounding",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a930/1zWEi72pRpC",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAkEU4f",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAoUTkt",
"doi": "10.1109/ICME.2011.6011912",
"title": "Motion synthesis for synchronizing with streaming music by segment-based search on metadata motion graphs",
"normalizedTitle": "Motion synthesis for synchronizing with streaming music by segment-based search on metadata motion graphs",
"abstract": "Music and dance are two major forms of entertainment in our daily life. Moreover, the fact that people dance to music suggests the possibility of synchronizing human motion with music. In this paper, we present a novel system to automatically synthesize human motion that is synchronized with streaming music using both rhythm and intensity features. In our system, a motion capture database is re-organized into a novel graph-based representation with metadata (called metadata motion graphs) beforehand, which is specially designed for the streaming application. When receiving a certain amount of music data as a segment, our system will search a best path for the segment on a metadata motion graph. This approach, whose effectiveness is demonstrated in a user study, can compose motions segment by segment, which (1) are synchronized with the music at a beat level in a short enough period, (2) are connected seamlessly with the previous segment, and (3) have the necessary synchronization capacity for the remaining music no matter how long it is.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Music and dance are two major forms of entertainment in our daily life. Moreover, the fact that people dance to music suggests the possibility of synchronizing human motion with music. In this paper, we present a novel system to automatically synthesize human motion that is synchronized with streaming music using both rhythm and intensity features. In our system, a motion capture database is re-organized into a novel graph-based representation with metadata (called metadata motion graphs) beforehand, which is specially designed for the streaming application. When receiving a certain amount of music data as a segment, our system will search a best path for the segment on a metadata motion graph. This approach, whose effectiveness is demonstrated in a user study, can compose motions segment by segment, which (1) are synchronized with the music at a beat level in a short enough period, (2) are connected seamlessly with the previous segment, and (3) have the necessary synchronization capacity for the remaining music no matter how long it is.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Music and dance are two major forms of entertainment in our daily life. Moreover, the fact that people dance to music suggests the possibility of synchronizing human motion with music. In this paper, we present a novel system to automatically synthesize human motion that is synchronized with streaming music using both rhythm and intensity features. In our system, a motion capture database is re-organized into a novel graph-based representation with metadata (called metadata motion graphs) beforehand, which is specially designed for the streaming application. When receiving a certain amount of music data as a segment, our system will search a best path for the segment on a metadata motion graph. This approach, whose effectiveness is demonstrated in a user study, can compose motions segment by segment, which (1) are synchronized with the music at a beat level in a short enough period, (2) are connected seamlessly with the previous segment, and (3) have the necessary synchronization capacity for the remaining music no matter how long it is.",
"fno": "06011912",
"keywords": [
"Synchronization",
"Motion Segmentation",
"Bidirectional Control",
"Databases",
"Rhythm",
"Animation",
"Humans",
"Motion Capture",
"Music Synchronization",
"Motion Synthesis",
"Numerical Optimization"
],
"authors": [
{
"affiliation": "Media Solutions Laboratory, KDDI R&D Laboratories Inc., Japan",
"fullName": "Jianfeng Xu",
"givenName": "Jianfeng",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Media Solutions Laboratory, KDDI R&D Laboratories Inc., Japan",
"fullName": "Koichi Takagi",
"givenName": "Koichi",
"surname": "Takagi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Media Solutions Laboratory, KDDI R&D Laboratories Inc., Japan",
"fullName": "Shigeyuki Sakazawa",
"givenName": "Shigeyuki",
"surname": "Sakazawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2011",
"issn": "1945-7871",
"isbn": "978-1-61284-348-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06011911",
"articleId": "12OmNwBBqgn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06011913",
"articleId": "12OmNBJw9Uk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/culture-computing/2013/5047/0/5047a155",
"title": "Resynchronize Japanese \"Geisha\" Dance Video Using Music of Different Styles",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2013/5047a155/12OmNB0nWdt",
"parentPublication": {
"id": "proceedings/culture-computing/2013/5047/0",
"title": "2013 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2009/3884/0/pid979960",
"title": "Gesture Based Music Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2009/pid979960/12OmNrJRPfy",
"parentPublication": {
"id": "proceedings/icetet/2009/3884/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b892",
"title": "Temporal Segmentation and Seamless Stitching of Motion Patterns for Synthesizing Novel Animations of Periodic Dances",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b892/12OmNvDZEQ3",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2011/0774/0/05959594",
"title": "Sensing Dance Engagement for Collaborative Music Control",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2011/05959594/12OmNwwuDTT",
"parentPublication": {
"id": "proceedings/iswc/2011/0774/0",
"title": "2011 15th Annual International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030501",
"title": "Example-Based Automatic Music-Driven Conventional Dance Motion Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030501/13rRUwwaKt6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3381",
"title": "AI Choreographer: Music Conditioned 3D Dance Generation with AIST++",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3381/1BmJ1TiWSB2",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09745335",
"title": "Rhythm is a Dancer: Music-Driven Motion Synthesis with Global Structure",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09745335/1CagHUR61pe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10018173",
"title": "Keyframe Control of Music-driven 3D Dance Generation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10018173/1JYZ6TXyjgk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isaiam/2021/3260/0/326000a055",
"title": "AutoDance: Music Driven Dance Generation",
"doi": null,
"abstractUrl": "/proceedings-article/isaiam/2021/326000a055/1wiQVBNgFhe",
"parentPublication": {
"id": "proceedings/isaiam/2021/3260/0",
"title": "2021 International Symposium on Artificial Intelligence and its Application on Media (ISAIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a348",
"title": "Dance to Music: Generative Choreography with Music using Mixture Density Networks",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a348/1xPslGYA8Gk",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAkEU3p",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "1",
"displayVolume": "1",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBInLkR",
"doi": "10.1109/CVPR.2004.1315121",
"title": "Modeling complex motion by tracking and editing hidden Markov graphs",
"normalizedTitle": "Modeling complex motion by tracking and editing hidden Markov graphs",
"abstract": "We propose a generative model for representing complex motion, such as wavy river, dancing fire and dangling cloth. Our generative method consists of four components: (1) A photometric model using primal sketch[8] which transfers an image into an attribute graph representation. Each vertex of the graph is a scaled and oriented image patch selected from a dictionary. The graph connects and aligns these patches. (2) A geometric model which characterizes the deformation of the attribute graph. (3) A dynamic model, which specifies the motion dynamics of these vertices (patches) and their interactions in the form of coupled Markov chains. (4) A topological model, which interprets the graph topological changes over time. We learn this generative model by a stochastic gradient algorithm implemented by Markov Chain Monte Carlo (MCMC) sampling. This method is shown to be effective in handling the topological changes of graphs. The correctness of the learned model is verified by the low-dimension reconstruction of the original image as well as by the realistic motion sequences it synthesized.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a generative model for representing complex motion, such as wavy river, dancing fire and dangling cloth. Our generative method consists of four components: (1) A photometric model using primal sketch[8] which transfers an image into an attribute graph representation. Each vertex of the graph is a scaled and oriented image patch selected from a dictionary. The graph connects and aligns these patches. (2) A geometric model which characterizes the deformation of the attribute graph. (3) A dynamic model, which specifies the motion dynamics of these vertices (patches) and their interactions in the form of coupled Markov chains. (4) A topological model, which interprets the graph topological changes over time. We learn this generative model by a stochastic gradient algorithm implemented by Markov Chain Monte Carlo (MCMC) sampling. This method is shown to be effective in handling the topological changes of graphs. The correctness of the learned model is verified by the low-dimension reconstruction of the original image as well as by the realistic motion sequences it synthesized.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a generative model for representing complex motion, such as wavy river, dancing fire and dangling cloth. Our generative method consists of four components: (1) A photometric model using primal sketch[8] which transfers an image into an attribute graph representation. Each vertex of the graph is a scaled and oriented image patch selected from a dictionary. The graph connects and aligns these patches. (2) A geometric model which characterizes the deformation of the attribute graph. (3) A dynamic model, which specifies the motion dynamics of these vertices (patches) and their interactions in the form of coupled Markov chains. (4) A topological model, which interprets the graph topological changes over time. We learn this generative model by a stochastic gradient algorithm implemented by Markov Chain Monte Carlo (MCMC) sampling. This method is shown to be effective in handling the topological changes of graphs. The correctness of the learned model is verified by the low-dimension reconstruction of the original image as well as by the realistic motion sequences it synthesized.",
"fno": "01315121",
"keywords": [
"Image Motion Analysis",
"Image Representation",
"Image Reconstruction",
"Image Sequences",
"Graph Theory",
"Gradient Methods",
"Sampling Methods",
"Hidden Markov Models",
"Monte Carlo Methods",
"Complex Motion Representation",
"Hidden Markov Graphs",
"Generative Model",
"Photometric Model",
"Attribute Graph Representation",
"Geometric Model",
"Dynamic Model",
"Motion Dynamics",
"Coupled Markov Chains",
"Topological Model",
"Stochastic Gradient Algorithm",
"Markov Chain Monte Carlo Sampling",
"Low Dimension Image Reconstruction",
"Realistic Motion Sequences",
"Learning Model",
"Hidden Markov Models",
"Tracking",
"Solid Modeling",
"Deformable Models",
"Rivers",
"Fires",
"Photometry",
"Dictionaries",
"Stochastic Processes",
"Monte Carlo Methods"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., California Univ., Los Angeles, CA, USA",
"fullName": "Yizhou Wang",
"givenName": null,
"surname": "Yizhou Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Song Chun Zhu",
"givenName": null,
"surname": "Song Chun Zhu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-01-01T00:00:00",
"pubType": "proceedings",
"pages": "I-856-I-863 Vol.1",
"year": "2004",
"issn": "1063-6919",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01315120",
"articleId": "12OmNvTTcjA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01315122",
"articleId": "12OmNvq5jBs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457666",
"title": "Learning mixed-state Markov models for statistical motion texture tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457666/12OmNBVrjp9",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asiajcis/2012/4776/0/06298142",
"title": "Attack Sequence Detection in Cloud Using Hidden Markov Model",
"doi": null,
"abstractUrl": "/proceedings-article/asiajcis/2012/06298142/12OmNvT2pdj",
"parentPublication": {
"id": "proceedings/asiajcis/2012/4776/0",
"title": "2012 Seventh Asia Joint Conference on Information Security (ASIA JCIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/3/01394555",
"title": "Statistical motion characterization for video content classification",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394555/12OmNweBUKW",
"parentPublication": {
"id": "proceedings/icme/2004/8603/3",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a193",
"title": "Synthesis and Editing of Human Motion with Generative Human Motion Model",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a193/12OmNyYm2oB",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cimca/2005/2504/2/250420450",
"title": "Human Motion Recognition through Fuzzy Hidden Markov Model",
"doi": null,
"abstractUrl": "/proceedings-article/cimca/2005/250420450/12OmNzkMlNJ",
"parentPublication": {
"id": "proceedings/cimca/2005/2504/1",
"title": "Computational Intelligence for Modelling, Control and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a616",
"title": "A Simplification Algorithm for Visualizing the Structure of Complex Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a616/12OmNzsJ7xO",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2016/07/07425225",
"title": "Aspect-Level Influence Discovery from Graphs",
"doi": null,
"abstractUrl": "/journal/tk/2016/07/07425225/13rRUy3xY8z",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2022/0883/0/088300a272",
"title": "Evaluating Complex Queries on Streaming Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2022/088300a272/1FwFcN6Iy0o",
"parentPublication": {
"id": "proceedings/icde/2022/0883/0",
"title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a725",
"title": "Self-supervised Deformation Modeling for Facial Expression Editing",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a725/1kecITvBymY",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyfdOIi",
"title": "2011 International Conference on Computational and Information Sciences",
"acronym": "iccis",
"groupId": "1800262",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBcShTc",
"doi": "10.1109/ICCIS.2011.158",
"title": "GPU-based Motion Blending for Motion Graphs",
"normalizedTitle": "GPU-based Motion Blending for Motion Graphs",
"abstract": "Character animation, bringing virtual characters to life, plays an important role in modern filmmaking, video games and virtual reality simulations. Recently, the motion graph has been widely used in the motion synthesis of character animations. An effective motion graph should take into account both the diversity of motions and the time cost of graph construction and search. However, motion graph approach becomes time consuming as graph size increases. Due to the fact that the most time consuming step in motion synthesis is the blending operations, which builds the whole motion graph. Sometimes it takes hours to build a graph. A compromise between visual quality and performance needs to be found. But the efficient use of modern programmable graphics hardware offers potential to increase both visual quality and performance. A major aspect lies in the architecture of modern graphics hardware, known as graphics processing units (GPUs). In this paper, we propose the GPU-based motion blending algorithm for motion graphs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Character animation, bringing virtual characters to life, plays an important role in modern filmmaking, video games and virtual reality simulations. Recently, the motion graph has been widely used in the motion synthesis of character animations. An effective motion graph should take into account both the diversity of motions and the time cost of graph construction and search. However, motion graph approach becomes time consuming as graph size increases. Due to the fact that the most time consuming step in motion synthesis is the blending operations, which builds the whole motion graph. Sometimes it takes hours to build a graph. A compromise between visual quality and performance needs to be found. But the efficient use of modern programmable graphics hardware offers potential to increase both visual quality and performance. A major aspect lies in the architecture of modern graphics hardware, known as graphics processing units (GPUs). In this paper, we propose the GPU-based motion blending algorithm for motion graphs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Character animation, bringing virtual characters to life, plays an important role in modern filmmaking, video games and virtual reality simulations. Recently, the motion graph has been widely used in the motion synthesis of character animations. An effective motion graph should take into account both the diversity of motions and the time cost of graph construction and search. However, motion graph approach becomes time consuming as graph size increases. Due to the fact that the most time consuming step in motion synthesis is the blending operations, which builds the whole motion graph. Sometimes it takes hours to build a graph. A compromise between visual quality and performance needs to be found. But the efficient use of modern programmable graphics hardware offers potential to increase both visual quality and performance. A major aspect lies in the architecture of modern graphics hardware, known as graphics processing units (GPUs). In this paper, we propose the GPU-based motion blending algorithm for motion graphs.",
"fno": "4501a955",
"keywords": [
"Character Animation",
"Motion Graphs",
"Graphics Processing Units",
"Motion Blending",
"Motion Capture"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiang Li",
"givenName": "Xiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiao Lin",
"givenName": "Xiao",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yan Gao",
"givenName": "Yan",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bin Sheng",
"givenName": "Bin",
"surname": "Sheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lizhuang Ma",
"givenName": "Lizhuang",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-10-01T00:00:00",
"pubType": "proceedings",
"pages": "955-959",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4501-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4501a951",
"articleId": "12OmNzUxO5b",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4501a960",
"articleId": "12OmNwNwzIK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dmdcm/2011/4413/0/4413a150",
"title": "Fast Computation of Transition Points for Motion Graph",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a150/12OmNAHmOuI",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2012/4789/0/4789a530",
"title": "Data-Driven Based Interactive Motion Blending",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2012/4789a530/12OmNAY79d9",
"parentPublication": {
"id": "proceedings/iccis/2012/4789/0",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2010/4215/0/4215a046",
"title": "Building Hand Motion-Based Character Animation: The Case of Puppetry",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2010/4215a046/12OmNAle6A0",
"parentPublication": {
"id": "proceedings/cw/2010/4215/0",
"title": "2010 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2011/4602/0/4602a070",
"title": "Motion Data Retrieval from Very Large Motion Databases",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a070/12OmNBA9oAc",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770841",
"title": "Control of Motion in Character Animation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770841/12OmNCfSqGT",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctd/2009/3892/2/3892b435",
"title": "Motion Graph for Character Animation: Design Considerations",
"doi": null,
"abstractUrl": "/proceedings-article/icctd/2009/3892b435/12OmNrGb2kv",
"parentPublication": {
"id": "proceedings/icctd/2009/3892/2",
"title": "Computer Technology and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759447",
"title": "Continual surface-based multi-projector blending for moving objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759447/12OmNvStcQS",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770415",
"title": "Blending Solids with Approximate Analytical Solution to PDE",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770415/12OmNwFicTE",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2002/1784/0/17840145",
"title": "Blending Multiple Views",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840145/12OmNzDehbe",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2006/2754/0/27540626",
"title": "A Motion Blending Approach Based on Unsupervised Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2006/27540626/12OmNzzfTlW",
"parentPublication": {
"id": "proceedings/icat/2006/2754/0",
"title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyNQSG2",
"title": "2012 17th International Conference on Computer Games (CGAMES)",
"acronym": "cgames",
"groupId": "1800470",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvH7fjV",
"doi": "10.1109/CGames.2012.6314551",
"title": "Real-time motion editing for reaching tasks using multiple internal graphs",
"normalizedTitle": "Real-time motion editing for reaching tasks using multiple internal graphs",
"abstract": "In this paper we present a motion editing technique for designing animation based on a database of reference postures and the construction of multiple internal graphs in realtime. These internal graphs are generated for each body part that we wish to define in the animation. These graphs drive the body parts that are pre-defined by the user in order to design realistic movements during the animation process. The proposed solution is developed for reaching postures where the locomotion of the character isn't updated.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present a motion editing technique for designing animation based on a database of reference postures and the construction of multiple internal graphs in realtime. These internal graphs are generated for each body part that we wish to define in the animation. These graphs drive the body parts that are pre-defined by the user in order to design realistic movements during the animation process. The proposed solution is developed for reaching postures where the locomotion of the character isn't updated.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present a motion editing technique for designing animation based on a database of reference postures and the construction of multiple internal graphs in realtime. These internal graphs are generated for each body part that we wish to define in the animation. These graphs drive the body parts that are pre-defined by the user in order to design realistic movements during the animation process. The proposed solution is developed for reaching postures where the locomotion of the character isn't updated.",
"fno": "S2002",
"keywords": [
"Animation",
"Databases",
"Games",
"Real Time Systems",
"Joints",
"Wrapping",
"Algorithm Design And Analysis",
"Reaching",
"Motion Editing",
"Internal Graphs"
],
"authors": [
{
"affiliation": "Dept. of Informatics University of Sussex, Brighton, England",
"fullName": "Christos Mousas",
"givenName": "Christos",
"surname": "Mousas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Informatics, University of Sussex, Brighton, England",
"fullName": "Paul Newbury",
"givenName": "Paul",
"surname": "Newbury",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cgames",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "51-55",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1120-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "S2001",
"articleId": "12OmNwtn3wV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "S2003",
"articleId": "12OmNwNwzED",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2011/0039/0/05759459",
"title": "AR aided implant templating for unilateral fracture reduction and internal fixation surgery",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759459/12OmNCcKQDV",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492801",
"title": "Internal and external scene graphs: a new approach for flexible distributed render engines",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492801/12OmNy5hRfj",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfcse/2011/1562/0/06041689",
"title": "The Affection of Posture of Tai Chi Exercise to Internal Force and Muscular Torque of Knee and Ankle",
"doi": null,
"abstractUrl": "/proceedings-article/icfcse/2011/06041689/12OmNzahcdO",
"parentPublication": {
"id": "proceedings/icfcse/2011/1562/0",
"title": "2011 International Conference on Future Computer Science and Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/06/09334430",
"title": "Symbiotic Graph Neural Networks for 3D Skeleton-Based Human Action Recognition and Motion Prediction",
"doi": null,
"abstractUrl": "/journal/tp/2022/06/09334430/1qB7sQdkASI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzzxuy8",
"title": "2013 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvq5jDZ",
"doi": "10.1109/CW.2013.54",
"title": "Multi-Touch Interface for Character Motion Control Using Model-Based Approach",
"normalizedTitle": "Multi-Touch Interface for Character Motion Control Using Model-Based Approach",
"abstract": "In this paper, we propose a new method for interactive motion control with a multi-touch interface. A user of our system can touch and drag character's body parts to control its motion. The character's full body motion is driven by our interactive motion control model based on the movement of a few body parts which are directly manipulated by the user via the multi-touch interface. We propose a method for determining 3-dimensional positions of controlled body parts from 2-dimensional touch inputs based on the character's local coordinates and drag speed. We introduce a point-based pose representation which consists of the positions or orientations of a small number of primary body parts. Based on the representation, we develop a motion control model that includes modules for tracking, balance, inter-body interaction, relaxing and self-collision avoidance. The character's pose is reconstructed from the point-based pose representation. We present our experimental results to show that our framework can realize various natural-looking motions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a new method for interactive motion control with a multi-touch interface. A user of our system can touch and drag character's body parts to control its motion. The character's full body motion is driven by our interactive motion control model based on the movement of a few body parts which are directly manipulated by the user via the multi-touch interface. We propose a method for determining 3-dimensional positions of controlled body parts from 2-dimensional touch inputs based on the character's local coordinates and drag speed. We introduce a point-based pose representation which consists of the positions or orientations of a small number of primary body parts. Based on the representation, we develop a motion control model that includes modules for tracking, balance, inter-body interaction, relaxing and self-collision avoidance. The character's pose is reconstructed from the point-based pose representation. We present our experimental results to show that our framework can realize various natural-looking motions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a new method for interactive motion control with a multi-touch interface. A user of our system can touch and drag character's body parts to control its motion. The character's full body motion is driven by our interactive motion control model based on the movement of a few body parts which are directly manipulated by the user via the multi-touch interface. We propose a method for determining 3-dimensional positions of controlled body parts from 2-dimensional touch inputs based on the character's local coordinates and drag speed. We introduce a point-based pose representation which consists of the positions or orientations of a small number of primary body parts. Based on the representation, we develop a motion control model that includes modules for tracking, balance, inter-body interaction, relaxing and self-collision avoidance. The character's pose is reconstructed from the point-based pose representation. We present our experimental results to show that our framework can realize various natural-looking motions.",
"fno": "2246a330",
"keywords": [
"Joints",
"Foot",
"Pelvis",
"Motion Control",
"Cameras",
"Vectors",
"Computational Modeling",
"Character Animation",
"Motion Control",
"Multi Touch Interface",
"Computer Animation"
],
"authors": [
{
"affiliation": "Kyushu Inst. of Technol., Fukuoka, Japan",
"fullName": "Masaki Oshita",
"givenName": "Masaki",
"surname": "Oshita",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-10-01T00:00:00",
"pubType": "proceedings",
"pages": "330-337",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2246-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2246a326",
"articleId": "12OmNvAAtyb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2246a338",
"articleId": "12OmNvAiSG8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dmdcm/2011/4413/0/4413a086",
"title": "A Review of Dynamic Motion Control Considering Physics for Real Time Animation Character",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a086/12OmNBJeyHe",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mnrao/1994/6435/0/00346255",
"title": "Active motion-based segmentation of human body outlines",
"doi": null,
"abstractUrl": "/proceedings-article/mnrao/1994/00346255/12OmNBSBk7z",
"parentPublication": {
"id": "proceedings/mnrao/1994/6435/0",
"title": "Proceedings of 1994 IEEE Workshop on Motion of Non-rigid and Articulated Objects",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770841",
"title": "Control of Motion in Character Animation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770841/12OmNCfSqGT",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2012/1120/0/S2002",
"title": "Real-time motion editing for reaching tasks using multiple internal graphs",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2012/S2002/12OmNvH7fjV",
"parentPublication": {
"id": "proceedings/cgames/2012/1120/0",
"title": "2012 17th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012201",
"title": "Touch and motion (tactile sensor)",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012201/12OmNvy2586",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761222",
"title": "Quantitative analysis of Iaido proficiency by using motion data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761222/12OmNx2QUKw",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446498",
"title": "Performance-Driven Dance Motion Control of a Virtual Partner Character",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446498/13bd1fdV4lU",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050774",
"title": "Analyzing Locomotion Synthesis with Feature-Based Motion Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050774/13rRUygT7sE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102964",
"title": "An Automatic Framework For Generating Labanotation Scores From Continuous Motion Capture Data",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102964/1kwrdFJpdtu",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h104",
"title": "MultiBodySync: Multi-Body Segmentation and Motion Estimation via 3D Scan Synchronization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h104/1yeLOCxBw5y",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyugyQd",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"acronym": "psivt",
"groupId": "1800241",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx5Yvb6",
"doi": "10.1109/PSIVT.2010.88",
"title": "Sophisticated Construction and Search of 2D Motion Graphs for Synthesizing Videos",
"normalizedTitle": "Sophisticated Construction and Search of 2D Motion Graphs for Synthesizing Videos",
"abstract": "This paper presents an intuitive method for synthesizing videos by directly manipulating video objects without using 3D models. The proposed method extracts a video object from each video frame and creates locally consistent video sequences using a 2D motion graph, where its node corresponds to the extracted video object and its edge represents a motion transition between a pair of nodes. Our primary contribution lies in a sophisticated construction of the 2D motion graph using shape matching techniques, and its search that allows us to intuitively synthesize a new video sequence by manipulating feature points extracted from the video objects through the 2D screen space. The method further employs a deformation technique to interpolate between video objects with relatively different shapes, and thus can increase available motion transitions by inserting intervening video objects into the 2D motion graph. Several examples have been generated to demonstrate that this approach can create the user-intended motions of the video objects easily by clicking and dragging the feature points.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an intuitive method for synthesizing videos by directly manipulating video objects without using 3D models. The proposed method extracts a video object from each video frame and creates locally consistent video sequences using a 2D motion graph, where its node corresponds to the extracted video object and its edge represents a motion transition between a pair of nodes. Our primary contribution lies in a sophisticated construction of the 2D motion graph using shape matching techniques, and its search that allows us to intuitively synthesize a new video sequence by manipulating feature points extracted from the video objects through the 2D screen space. The method further employs a deformation technique to interpolate between video objects with relatively different shapes, and thus can increase available motion transitions by inserting intervening video objects into the 2D motion graph. Several examples have been generated to demonstrate that this approach can create the user-intended motions of the video objects easily by clicking and dragging the feature points.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an intuitive method for synthesizing videos by directly manipulating video objects without using 3D models. The proposed method extracts a video object from each video frame and creates locally consistent video sequences using a 2D motion graph, where its node corresponds to the extracted video object and its edge represents a motion transition between a pair of nodes. Our primary contribution lies in a sophisticated construction of the 2D motion graph using shape matching techniques, and its search that allows us to intuitively synthesize a new video sequence by manipulating feature points extracted from the video objects through the 2D screen space. The method further employs a deformation technique to interpolate between video objects with relatively different shapes, and thus can increase available motion transitions by inserting intervening video objects into the 2D motion graph. Several examples have been generated to demonstrate that this approach can create the user-intended motions of the video objects easily by clicking and dragging the feature points.",
"fno": "4285a487",
"keywords": [
"2 D Motion Graphs",
"Video Synthesis",
"Feature Point Tracking"
],
"authors": [
{
"affiliation": null,
"fullName": "Jun Kobayashi",
"givenName": "Jun",
"surname": "Kobayashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chongke Bi",
"givenName": "Chongke",
"surname": "Bi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shigeo Takahashi",
"givenName": "Shigeo",
"surname": "Takahashi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "psivt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-11-01T00:00:00",
"pubType": "proceedings",
"pages": "487-494",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4285-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4285a481",
"articleId": "12OmNrIJqzy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4285a495",
"articleId": "12OmNyLiuxg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a146",
"title": "A 2D-3D Hybrid Approach to Video Stabilization",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a146/12OmNAq3huO",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583347",
"title": "A 2D to 3D conversion scheme based on depth cues analysis for MPEG videos",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583347/12OmNBQ2W2b",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bic-ta/2011/4514/0/4514a290",
"title": "P Systems with 2D Picture Grammars",
"doi": null,
"abstractUrl": "/proceedings-article/bic-ta/2011/4514a290/12OmNC8uRhN",
"parentPublication": {
"id": "proceedings/bic-ta/2011/4514/0",
"title": "Bio-Inspired Computing: Theories and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761470",
"title": "Synthesizing 3D videos by a motion-conditioned background mosaic",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761470/12OmNC8uRjD",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wvlsid/2000/0534/0/05340067",
"title": "Low Power VLSI Architecture for 2D-Mesh Video Object Motion Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/wvlsid/2000/05340067/12OmNqBKUbV",
"parentPublication": {
"id": "proceedings/wvlsid/2000/0534/0",
"title": "VLSI, IEEE Computer Society Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d963",
"title": "Real-Time 3D Face and Facial Action Tracking Using Extended 2D+3D AAMs",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d963/12OmNroijel",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042348",
"title": "Video Tracking Of 2D Face Motion During Speech",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042348/12OmNvCzFd9",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995650",
"title": "3D motion reconstruction for real-world camera motion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995650/12OmNvTTccO",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420549",
"title": "3D-2D projective registration of free-form curves and surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420549/12OmNwD1q2N",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1996/08/i0814",
"title": "Compact Representations of Videos Through Dominant and Multiple Motion Estimation",
"doi": null,
"abstractUrl": "/journal/tp/1996/08/i0814/13rRUyY28Zj",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrkjVbR",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyQYt2c",
"doi": "10.1109/ICME.2013.6607506",
"title": "Motion synthesis for affective agents using piecewise principal component regression",
"normalizedTitle": "Motion synthesis for affective agents using piecewise principal component regression",
"abstract": "An affective style of human motion is essential for human computer interaction using embodied conversational agents. Motion synthesis for affective agents generates a skeletal motion in a particular affective style (briefly called affective motion in this paper) from an input neutral motion. This appeals to the user but is very challenging due to the well-known fact that a skeletal motion is a high-dimensional and non-linear signal. We solve this problem by using regression analysis to estimate the relationship between neutral motions and affective motions, adopting principal component regression (PCR) to deal with the high-dimensional motion signal for the first time. Furthermore, we propose a novel method called piecewise principal component regression (PPCR) to deal with the non-linear problem, in which the motion signal is automatically divided into several segments and PCR is performed on each segment. Our experimental results demonstrate that the proposed PPCR method is successful in generating affective motion within high quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An affective style of human motion is essential for human computer interaction using embodied conversational agents. Motion synthesis for affective agents generates a skeletal motion in a particular affective style (briefly called affective motion in this paper) from an input neutral motion. This appeals to the user but is very challenging due to the well-known fact that a skeletal motion is a high-dimensional and non-linear signal. We solve this problem by using regression analysis to estimate the relationship between neutral motions and affective motions, adopting principal component regression (PCR) to deal with the high-dimensional motion signal for the first time. Furthermore, we propose a novel method called piecewise principal component regression (PPCR) to deal with the non-linear problem, in which the motion signal is automatically divided into several segments and PCR is performed on each segment. Our experimental results demonstrate that the proposed PPCR method is successful in generating affective motion within high quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An affective style of human motion is essential for human computer interaction using embodied conversational agents. Motion synthesis for affective agents generates a skeletal motion in a particular affective style (briefly called affective motion in this paper) from an input neutral motion. This appeals to the user but is very challenging due to the well-known fact that a skeletal motion is a high-dimensional and non-linear signal. We solve this problem by using regression analysis to estimate the relationship between neutral motions and affective motions, adopting principal component regression (PCR) to deal with the high-dimensional motion signal for the first time. Furthermore, we propose a novel method called piecewise principal component regression (PPCR) to deal with the non-linear problem, in which the motion signal is automatically divided into several segments and PCR is performed on each segment. Our experimental results demonstrate that the proposed PPCR method is successful in generating affective motion within high quality.",
"fno": "06607506",
"keywords": [
"Motion Segmentation",
"Joints",
"Principal Component Analysis",
"Estimation",
"Training Data",
"Human Computer Interaction",
"Legged Locomotion",
"Piecewise Regression",
"Motion Synthesis",
"Affective Computing",
"Principal Component Regression"
],
"authors": [
{
"affiliation": "Media and HTML5 Application Laboratory, KDDI R&D Laboratories, Inc., 2-1-15 Ohara, Fujimino-shi, Saitama, 356-8502 Japan",
"fullName": "Jianfeng Xu",
"givenName": "Jianfeng",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Media and HTML5 Application Laboratory, KDDI R&D Laboratories, Inc., 2-1-15 Ohara, Fujimino-shi, Saitama, 356-8502 Japan",
"fullName": "Emi Myodo",
"givenName": "Emi",
"surname": "Myodo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Media and HTML5 Application Laboratory, KDDI R&D Laboratories, Inc., 2-1-15 Ohara, Fujimino-shi, Saitama, 356-8502 Japan",
"fullName": "Shigeyuki Sakazawa",
"givenName": "Shigeyuki",
"surname": "Sakazawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2013",
"issn": "1945-7871",
"isbn": "978-1-4799-0015-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06607505",
"articleId": "12OmNAgoV92",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06607507",
"articleId": "12OmNzayNtr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457d558",
"title": "Surface Motion Capture Transfer with Gaussian Process Regression",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d558/12OmNA0vnYh",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349438",
"title": "A comparison of PCA, KPCA and LDA for feature extraction to recognize affect in gait kinematics",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349438/12OmNAWpyxp",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceet/2009/3819/2/3819b745",
"title": "Simultaneous Spectrophotometric Determination of Copper, Zinc, Nickel and Cobalt in Water Using Principal Component Regression Coupled with Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/iceet/2009/3819b745/12OmNwE9OwR",
"parentPublication": {
"id": "proceedings/iceet/2009/3819/2",
"title": "Energy and Environment Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a765",
"title": "Analysis and Modelling of Affective Japanese Sitting Postures",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a765/12OmNy1SFNV",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995618",
"title": "Principal regression analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995618/12OmNzWfoQ0",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmv/2009/3944/0/3944a177",
"title": "Improved Principal Components Regression with Rough Set and its Application in the Modeling of Warship LCC",
"doi": null,
"abstractUrl": "/proceedings-article/icmv/2009/3944a177/12OmNzXWZIE",
"parentPublication": {
"id": "proceedings/icmv/2009/3944/0",
"title": "Machine Vision, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a091",
"title": "Analysis and Modelling of Affective Japanese Sitting Postures by Japanese and British Observers",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a091/12OmNzlUKxa",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2018/3224/0/08444576",
"title": "Regression-based, mistake-driven movement skill estimation in Nordic Walking using wearable inertial sensors",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2018/08444576/13bd1gFCjrj",
"parentPublication": {
"id": "proceedings/percom/2018/3224/0",
"title": "2018 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/02/08985257",
"title": "Affective Dynamics: Principal Motion Analysis of Temporal Dominance of Sensations and Emotions Data",
"doi": null,
"abstractUrl": "/journal/ta/2022/02/08985257/1hcy75Z9fpe",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089573",
"title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users’ Affective and Non-Verbal Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvmowTg",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"acronym": "vv",
"groupId": "1000808",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyqRn5T",
"doi": "10.1109/SWG.2002.1226510",
"title": "Haptics-Based Volumetric Modeling Using Dynamic Spline-Based Implicit Functions",
"normalizedTitle": "Haptics-Based Volumetric Modeling Using Dynamic Spline-Based Implicit Functions",
"abstract": "This paper systematically presents a novel haptics-based volumetric modeling framework, which is founded upon volumetric implicit functions and powerful physics-based modeling. The volumetric implicit functions incorporate hierarchical B-splines, CSG-based functional composition, and knot insertion to facilitate multiresolution editing and level of details (LODs) control. Our dynamic volumes are semi-algebraic sets of implicit functions and are governed by the principle of dynamics, hence responding to sculpting forces in a natural and predictive manner. The versatility of our volumetric modeling affords users to easily modify both the geometry and the topology of modeled objects, while the inherent physical properties can offer an intuitive mechanism for direct manipulation. Moreover, we augment our modeling environment with a natural haptic interface, in order to take advantage of the additional realism associated with 3D haptic interaction. Coupling physics and haptics with implicit functions can realize all the potentials exhibited by volumetric modeling, physics-based modeling, and haptic interface. Furthermore, in order to directly manipulate existing volumetric datasets as well as point clouds, we develop a hierarchical fitting algorithm to reconstruct and represent discrete datasets using our continuous implicit functions, which permit users to further design and edit those 3D models in real-time using a large variety of haptic toolkits and visualize their interactive deformation at arbitrary resolution.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper systematically presents a novel haptics-based volumetric modeling framework, which is founded upon volumetric implicit functions and powerful physics-based modeling. The volumetric implicit functions incorporate hierarchical B-splines, CSG-based functional composition, and knot insertion to facilitate multiresolution editing and level of details (LODs) control. Our dynamic volumes are semi-algebraic sets of implicit functions and are governed by the principle of dynamics, hence responding to sculpting forces in a natural and predictive manner. The versatility of our volumetric modeling affords users to easily modify both the geometry and the topology of modeled objects, while the inherent physical properties can offer an intuitive mechanism for direct manipulation. Moreover, we augment our modeling environment with a natural haptic interface, in order to take advantage of the additional realism associated with 3D haptic interaction. Coupling physics and haptics with implicit functions can realize all the potentials exhibited by volumetric modeling, physics-based modeling, and haptic interface. Furthermore, in order to directly manipulate existing volumetric datasets as well as point clouds, we develop a hierarchical fitting algorithm to reconstruct and represent discrete datasets using our continuous implicit functions, which permit users to further design and edit those 3D models in real-time using a large variety of haptic toolkits and visualize their interactive deformation at arbitrary resolution.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper systematically presents a novel haptics-based volumetric modeling framework, which is founded upon volumetric implicit functions and powerful physics-based modeling. The volumetric implicit functions incorporate hierarchical B-splines, CSG-based functional composition, and knot insertion to facilitate multiresolution editing and level of details (LODs) control. Our dynamic volumes are semi-algebraic sets of implicit functions and are governed by the principle of dynamics, hence responding to sculpting forces in a natural and predictive manner. The versatility of our volumetric modeling affords users to easily modify both the geometry and the topology of modeled objects, while the inherent physical properties can offer an intuitive mechanism for direct manipulation. Moreover, we augment our modeling environment with a natural haptic interface, in order to take advantage of the additional realism associated with 3D haptic interaction. Coupling physics and haptics with implicit functions can realize all the potentials exhibited by volumetric modeling, physics-based modeling, and haptic interface. Furthermore, in order to directly manipulate existing volumetric datasets as well as point clouds, we develop a hierarchical fitting algorithm to reconstruct and represent discrete datasets using our continuous implicit functions, which permit users to further design and edit those 3D models in real-time using a large variety of haptic toolkits and visualize their interactive deformation at arbitrary resolution.",
"fno": "76410055",
"keywords": [
"Computer Graphics",
"Computational Geometry And Object Modeling",
"Physically Based Modeling",
"Methodology And Techniques",
"Interaction Techniques",
"Information Interfaces And Presentation",
"User Interfaces",
"Haptic I O",
"Implicit Function"
],
"authors": [
{
"affiliation": "State University of New York at Stony Brook",
"fullName": "Jing Hua",
"givenName": "Jing",
"surname": "Hua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State University of New York at Stony Brook",
"fullName": "Hong Qin",
"givenName": "Hong",
"surname": "Qin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-10-01T00:00:00",
"pubType": "proceedings",
"pages": "55-64",
"year": "2002",
"issn": null,
"isbn": "0-7803-7641-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "76410045",
"articleId": "12OmNzvhvCC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "76410065",
"articleId": "12OmNyGbIf6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2008/2005/0/04479964",
"title": "Voxel-Based Haptic Rendering Using Implicit Sphere Trees",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479964/12OmNBQkwYz",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2016/2303/0/2303a025",
"title": "Detail-Preserving 3D Shape Modeling from Raw Volumetric Dataset via Hessian-Constrained Local Implicit Surfaces Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a025/12OmNCbU38Z",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2010/7259/0/05521459",
"title": "Ridge Extraction from Isosurfaces of Volumetric Data Using Implicit B-Splines",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2010/05521459/12OmNxE2n2x",
"parentPublication": {
"id": "proceedings/smi/2010/7259/0",
"title": "Shape Modeling International (SMI 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2002/1546/0/15460119",
"title": "Dynamic Implicit Solids with Constraints for Haptic Sculpting",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2002/15460119/12OmNzRqdGH",
"parentPublication": {
"id": "proceedings/smi/2002/1546/0",
"title": "Proceedings SMI. Shape Modeling International 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2001/1227/0/12270254",
"title": "Haptic Sculpting of Volumetric Implicit Functions",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2001/12270254/12OmNzV70HP",
"parentPublication": {
"id": "proceedings/pg/2001/1227/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/09/ttg2013091539",
"title": "Surface Mesh to Volumetric Spline Conversion with Generalized Polycubes",
"doi": null,
"abstractUrl": "/journal/tg/2013/09/ttg2013091539/13rRUEgarsH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/02/01561185",
"title": "Global segmentation and curvature analysis of volumetric data sets using trivariate B-spline functions",
"doi": null,
"abstractUrl": "/journal/tp/2006/02/01561185/13rRUwjoNyc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/02/v0109",
"title": "Interactive Direct Rendering of Trivariate B-Spline Scalar Functions",
"doi": null,
"abstractUrl": "/journal/tg/2001/02/v0109/13rRUx0geff",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/05/v0574",
"title": "Haptics-Based Dynamic Implicit Solid Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2004/05/v0574/13rRUx0gepT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/04/07182778",
"title": "Volume Haptics with Topology-Consistent Isosurfaces",
"doi": null,
"abstractUrl": "/journal/th/2015/04/07182778/13rRUypp57K",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmGNVHFEZi",
"doi": "10.1109/ICCV48922.2021.01174",
"title": "Building-GAN: Graph-Conditioned Architectural Volumetric Design Generation",
"normalizedTitle": "Building-GAN: Graph-Conditioned Architectural Volumetric Design Generation",
"abstract": "Volumetric design is the first and critical step for professional building design, where architects not only depict the rough 3D geometry of the building but also specify the programs to form a 2D layout on each floor. Though 2D layout generation for a single story has been widely studied, there is no developed method for multi-story buildings. This paper focuses on volumetric design generation conditioned on an input program graph. Instead of outputting dense 3D voxels, we propose a new 3D representation named voxel graph that is both compact and expressive for building geometries. Our generator is a cross-modal graph neural network that uses a pointer mechanism to connect the input program graph and the output voxel graph, and the whole pipeline is trained using the adversarial framework. The generated designs are evaluated qualitatively by a user study and quantitatively using three metrics: quality, diversity, and connectivity accuracy. We show that our model generates realistic 3D volumetric designs and outperforms previous methods and baselines.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Volumetric design is the first and critical step for professional building design, where architects not only depict the rough 3D geometry of the building but also specify the programs to form a 2D layout on each floor. Though 2D layout generation for a single story has been widely studied, there is no developed method for multi-story buildings. This paper focuses on volumetric design generation conditioned on an input program graph. Instead of outputting dense 3D voxels, we propose a new 3D representation named voxel graph that is both compact and expressive for building geometries. Our generator is a cross-modal graph neural network that uses a pointer mechanism to connect the input program graph and the output voxel graph, and the whole pipeline is trained using the adversarial framework. The generated designs are evaluated qualitatively by a user study and quantitatively using three metrics: quality, diversity, and connectivity accuracy. We show that our model generates realistic 3D volumetric designs and outperforms previous methods and baselines.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Volumetric design is the first and critical step for professional building design, where architects not only depict the rough 3D geometry of the building but also specify the programs to form a 2D layout on each floor. Though 2D layout generation for a single story has been widely studied, there is no developed method for multi-story buildings. This paper focuses on volumetric design generation conditioned on an input program graph. Instead of outputting dense 3D voxels, we propose a new 3D representation named voxel graph that is both compact and expressive for building geometries. Our generator is a cross-modal graph neural network that uses a pointer mechanism to connect the input program graph and the output voxel graph, and the whole pipeline is trained using the adversarial framework. The generated designs are evaluated qualitatively by a user study and quantitatively using three metrics: quality, diversity, and connectivity accuracy. We show that our model generates realistic 3D volumetric designs and outperforms previous methods and baselines.",
"fno": "281200l1936",
"keywords": [
"Geometry",
"Measurement",
"Solid Modeling",
"Three Dimensional Displays",
"Buildings",
"Pipelines",
"Layout",
"Vision Applications And Systems",
"3 D From A Single Image And Shape From X",
"Adversarial Learning",
"Image And Video Synthesis",
"Vision Other Modalities"
],
"authors": [
{
"affiliation": "Autodesk Research,United States",
"fullName": "Kai-Hung Chang",
"givenName": "Kai-Hung",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Autodesk Research,United States",
"fullName": "Chin-Yi Cheng",
"givenName": "Chin-Yi",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Autodesk Research,United States",
"fullName": "Jieliang Luo",
"givenName": "Jieliang",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Obayashi AI Design Lab,Japan",
"fullName": "Shingo Murata",
"givenName": "Shingo",
"surname": "Murata",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Autodesk Research,United States",
"fullName": "Mehdi Nourbakhsh",
"givenName": "Mehdi",
"surname": "Nourbakhsh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Obayashi AI Design Lab,Japan",
"fullName": "Yoshito Tsuji",
"givenName": "Yoshito",
"surname": "Tsuji",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "11936-11945",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200l1926",
"articleId": "1BmFJGrFu2A",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200l1946",
"articleId": "1BmGAs5Iju0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480772",
"title": "New Rendering Approach for Composable Volumetric Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480772/12OmNBAqZId",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459406",
"title": "Multiperspective stereo matching and volumetric reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459406/12OmNy2JsZs",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a190",
"title": "Semantic Scene Completion from a Single Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a190/12OmNzn38Ky",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165673",
"title": "Very High Frame Rate Volumetric Integration of Depth Images on Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165673/13rRUxNEqPX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g123",
"title": "Neural Volumetric Object Selection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g123/1H1liGT3jaM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956267",
"title": "Robust 3D rotation invariant local binary pattern for volumetric texture classification",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956267/1IHp8KyzlN6",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a197",
"title": "HVTR: Hybrid Volumetric-Textural Rendering for Human Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a197/1KYsovTRTC8",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5735",
"title": "DECOR-GAN: 3D Shape Detailization by Conditional Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5735/1yeK92zTzgs",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h115",
"title": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h115/1yeLdyIKnV6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a700",
"title": "3DVNet: Multi-View Depth Prediction and Volumetric Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a700/1zWEh9peydi",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1itCwY51e",
"doi": "10.1109/CVPR52688.2022.00606",
"title": "NeuralHOFusion: Neural Volumetric Rendering under Human-object Interactions",
"normalizedTitle": "NeuralHOFusion: Neural Volumetric Rendering under Human-object Interactions",
"abstract": "4D modeling of human-object interactions is critical for numerous applications. However, efficient volumetric capture and rendering of complex interaction scenarios, especially from sparse inputs, remain challenging. In this paper, we propose NeuralHOFusion, a neural approach for volumetric human-object capture and rendering using sparse consumer RGBD sensors. It marries traditional non-rigid fusion with recent neural implicit modeling and blending advances, where the captured humans and objects are layer-wise disentangled. For geometry modeling, we propose a neural implicit inference scheme with non-rigid key-volume fusion, as well as a template-aid robust object tracking pipeline. Our scheme enables detailed and complete geometry generation under complex interactions and occlusions. Moreover, we introduce a layer-wise human-object texture rendering scheme, which combines volumetric and image-based rendering in both spatial and temporal domains to obtain photo-realistic results. Extensive experiments demonstrate the effectiveness and efficiency of our approach in synthesizing photo-realistic free-view results under complex human-object interactions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "4D modeling of human-object interactions is critical for numerous applications. However, efficient volumetric capture and rendering of complex interaction scenarios, especially from sparse inputs, remain challenging. In this paper, we propose NeuralHOFusion, a neural approach for volumetric human-object capture and rendering using sparse consumer RGBD sensors. It marries traditional non-rigid fusion with recent neural implicit modeling and blending advances, where the captured humans and objects are layer-wise disentangled. For geometry modeling, we propose a neural implicit inference scheme with non-rigid key-volume fusion, as well as a template-aid robust object tracking pipeline. Our scheme enables detailed and complete geometry generation under complex interactions and occlusions. Moreover, we introduce a layer-wise human-object texture rendering scheme, which combines volumetric and image-based rendering in both spatial and temporal domains to obtain photo-realistic results. Extensive experiments demonstrate the effectiveness and efficiency of our approach in synthesizing photo-realistic free-view results under complex human-object interactions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "4D modeling of human-object interactions is critical for numerous applications. However, efficient volumetric capture and rendering of complex interaction scenarios, especially from sparse inputs, remain challenging. In this paper, we propose NeuralHOFusion, a neural approach for volumetric human-object capture and rendering using sparse consumer RGBD sensors. It marries traditional non-rigid fusion with recent neural implicit modeling and blending advances, where the captured humans and objects are layer-wise disentangled. For geometry modeling, we propose a neural implicit inference scheme with non-rigid key-volume fusion, as well as a template-aid robust object tracking pipeline. Our scheme enables detailed and complete geometry generation under complex interactions and occlusions. Moreover, we introduce a layer-wise human-object texture rendering scheme, which combines volumetric and image-based rendering in both spatial and temporal domains to obtain photo-realistic results. Extensive experiments demonstrate the effectiveness and efficiency of our approach in synthesizing photo-realistic free-view results under complex human-object interactions.",
"fno": "694600g145",
"keywords": [
"Data Visualisation",
"Image Colour Analysis",
"Image Motion Analysis",
"Image Reconstruction",
"Image Registration",
"Image Sensors",
"Image Sequences",
"Image Texture",
"Medical Image Processing",
"Object Tracking",
"Rendering Computer Graphics",
"Layer Wise Human Object Texture Rendering Scheme",
"Volumetric Image Based Rendering",
"Human Object Interactions",
"Neural HO Fusion",
"Neural Volumetric Rendering",
"Efficient Volumetric Capture",
"Complex Interaction Scenarios",
"Neural Approach",
"Human Object Capture",
"Sparse Consumer RGBD Sensors",
"Nonrigid Fusion",
"Recent Neural Implicit Modeling",
"Blending Advances",
"Captured Humans",
"Neural Implicit Inference Scheme",
"Nonrigid Key Volume Fusion",
"Template Aid Robust Object Tracking Pipeline",
"Complex Interactions",
"Occlusions",
"Geometry",
"Computer Vision",
"Three Dimensional Displays",
"Telepresence",
"Face Recognition",
"Pose Estimation",
"Pipelines"
],
"authors": [
{
"affiliation": "ShanghaiTech University",
"fullName": "Yuheng Jiang",
"givenName": "Yuheng",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Suyi Jiang",
"givenName": "Suyi",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Guoxing Sun",
"givenName": "Guoxing",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tencent",
"fullName": "Zhuo Su",
"givenName": "Zhuo",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Meta Reasearch Lab",
"fullName": "Kaiwen Guo",
"givenName": "Kaiwen",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KU leuven",
"fullName": "Minye Wu",
"givenName": "Minye",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Jingyi Yu",
"givenName": "Jingyi",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Lan Xu",
"givenName": "Lan",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "6145-6155",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1itx04DTi",
"name": "pcvpr202269460-09879859s1-mm_694600g145.zip",
"size": "6.43 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879859s1-mm_694600g145.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600g133",
"articleId": "1H0OvjmcWUE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600g156",
"articleId": "1H1hG0ozucU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480772",
"title": "New Rendering Approach for Composable Volumetric Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480772/12OmNBAqZId",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/02/ttg2008020263",
"title": "Haptic Rendering of Dynamic Volumetric Data",
"doi": null,
"abstractUrl": "/journal/tg/2008/02/ttg2008020263/13rRUIIVlkb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/01/v0055",
"title": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures",
"doi": null,
"abstractUrl": "/journal/tg/1998/01/v0055/13rRUxly95q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1990/02/mcg1990020024",
"title": "Volumetric Rendering",
"doi": null,
"abstractUrl": "/magazine/cg/1990/02/mcg1990020024/13rRUy08Myt",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5914",
"title": "BEHAVE: Dataset and Method for Tracking Human Object Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5914/1H1lSo99gTC",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09925090",
"title": "RobustFusion: Robust Volumetric Performance Reconstruction Under Human-Object Interactions From Monocular RGBD Stream",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09925090/1HBHXf7iQZG",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a197",
"title": "HVTR: Hybrid Volumetric-Textural Rendering for Human Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a197/1KYsovTRTC8",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1728",
"title": "Pixel-aligned Volumetric Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1728/1yeHX163Xnq",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900g222",
"title": "NeuralHumanFVV: Real-Time Neural Volumetric Human Performance Rendering using RGB Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900g222/1yeIMelAx8s",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h115",
"title": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h115/1yeLdyIKnV6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnX0LxdiuI",
"doi": "10.1109/VRW52623.2021.00181",
"title": "Dynamic Projection Mapping with 3D Images Using Volumetric Display",
"normalizedTitle": "Dynamic Projection Mapping with 3D Images Using Volumetric Display",
"abstract": "Recently, projection mapping has become popular that it is used in various entertainments. In the research field, dynamic projection mapping has been studied to change the appearance of moving and deforming objects. Although it seamlessly connects the real world and the virtual world, complex equipment surrounds the object to achieve advanced projection and it is an unnecessarily visually disturbing factor. Furthermore, projection images are occluded when the target object is grasped and manipulated by hand. In this study, we propose a novel dynamic projection mapping with invisible projection devices with 3D images using a volumetric display and retro-transmissive optics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recently, projection mapping has become popular that it is used in various entertainments. In the research field, dynamic projection mapping has been studied to change the appearance of moving and deforming objects. Although it seamlessly connects the real world and the virtual world, complex equipment surrounds the object to achieve advanced projection and it is an unnecessarily visually disturbing factor. Furthermore, projection images are occluded when the target object is grasped and manipulated by hand. In this study, we propose a novel dynamic projection mapping with invisible projection devices with 3D images using a volumetric display and retro-transmissive optics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recently, projection mapping has become popular that it is used in various entertainments. In the research field, dynamic projection mapping has been studied to change the appearance of moving and deforming objects. Although it seamlessly connects the real world and the virtual world, complex equipment surrounds the object to achieve advanced projection and it is an unnecessarily visually disturbing factor. Furthermore, projection images are occluded when the target object is grasped and manipulated by hand. In this study, we propose a novel dynamic projection mapping with invisible projection devices with 3D images using a volumetric display and retro-transmissive optics.",
"fno": "405700a597",
"keywords": [
"Augmented Reality",
"Three Dimensional Displays",
"Virtual Reality",
"Dynamic Projection Mapping",
"Volumetric Display",
"Advanced Projection",
"Projection Images",
"Invisible Projection Devices",
"Three Dimensional Displays",
"Image Resolution",
"Conferences",
"Brightness",
"Entertainment Industry",
"User Interfaces",
"Real Time Systems",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "The University of Electro-Communications",
"fullName": "Masumi Kiyokawa",
"givenName": "Masumi",
"surname": "Kiyokawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Electro-Communications",
"fullName": "Naoki Hashimoto",
"givenName": "Naoki",
"surname": "Hashimoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "597-598",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnX05SIL5K",
"name": "pvrw202140570-09419229s1-mm_405700a597.zip",
"size": "122 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419229s1-mm_405700a597.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a595",
"articleId": "1tnX0QoWTLi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a599",
"articleId": "1tnXiU5GF9K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iiaiaai/2014/4174/0/06913421",
"title": "Development of Projection Mapping with Utility of Digital Signage",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2014/06913421/12OmNAnMuFg",
"parentPublication": {
"id": "proceedings/iiaiaai/2014/4174/0",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270471",
"title": "Inter-Reflection Compensation for Immersive Projection Display",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270471/12OmNwpGgIc",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apcase/2015/7588/0/7588a252",
"title": "Application of Collimated Projection Systems for the Purpose of Driving Simulators",
"doi": null,
"abstractUrl": "/proceedings-article/apcase/2015/7588a252/12OmNyKJixn",
"parentPublication": {
"id": "proceedings/apcase/2015/7588/0",
"title": "2015 Asia-Pacific Conference on Computer Aided System Engineering (APCASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572619",
"title": "Three dimensional projection-type integral imaging display system using directional projection and elemental image resizing method",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572619/12OmNz2kqgu",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802105",
"title": "Geometrically-correct projection-based texture mapping onto a cloth",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802105/12OmNzVXNZG",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a756",
"title": "Robust Tangible Projection Mapping with Multi-View Contour-Based Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a756/1CJeF1WYP1m",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873957",
"title": "Content-Aware Brightness Solving and Error Mitigation in Large-Scale Multi-Projection Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873957/1GjwJ0X1ks0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a603",
"title": "Proposal for an aerial display using dynamic projection mapping on a distant flying screen",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a603/1MNgKRrqL6g",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798245",
"title": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798245/1cI6ar8DdyE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiea/2021/3265/0/326500a157",
"title": "Research on the Methods of Panoramic Video Projection Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/aiea/2021/326500a157/1wzsE6D2J6U",
"parentPublication": {
"id": "proceedings/aiea/2021/3265/0",
"title": "2021 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1uqGdWlamUo",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1uqGjzXRQ3e",
"doi": "10.1109/WACV48630.2021.00170",
"title": "Autonomous Tracking For Volumetric Video Sequences",
"normalizedTitle": "Autonomous Tracking For Volumetric Video Sequences",
"abstract": "As a rapidly growing medium, volumetric video is gaining attention beyond academia, reaching industry and creative communities alike. This brings new challenges to reduce the barrier to entry from a technical and economical point of view. We present a system for robustly and autonomously performing temporally coherent tracking for volumetric sequences, specifically targeting those from sparse setups or with noisy output. Our system will detect and recover missing pertinent geometry across highly incoherent sequences as well as provide users the option of propagating drastic topology edits. In this way, affordable multi-view setups can leverage temporal consistency to reduce processing and compression overheads while also generating more aesthetically pleasing volumetric sequences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As a rapidly growing medium, volumetric video is gaining attention beyond academia, reaching industry and creative communities alike. This brings new challenges to reduce the barrier to entry from a technical and economical point of view. We present a system for robustly and autonomously performing temporally coherent tracking for volumetric sequences, specifically targeting those from sparse setups or with noisy output. Our system will detect and recover missing pertinent geometry across highly incoherent sequences as well as provide users the option of propagating drastic topology edits. In this way, affordable multi-view setups can leverage temporal consistency to reduce processing and compression overheads while also generating more aesthetically pleasing volumetric sequences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As a rapidly growing medium, volumetric video is gaining attention beyond academia, reaching industry and creative communities alike. This brings new challenges to reduce the barrier to entry from a technical and economical point of view. We present a system for robustly and autonomously performing temporally coherent tracking for volumetric sequences, specifically targeting those from sparse setups or with noisy output. Our system will detect and recover missing pertinent geometry across highly incoherent sequences as well as provide users the option of propagating drastic topology edits. In this way, affordable multi-view setups can leverage temporal consistency to reduce processing and compression overheads while also generating more aesthetically pleasing volumetric sequences.",
"fno": "047700b659",
"keywords": [
"Data Compression",
"Image Sequences",
"Video Signal Processing",
"Volumetric Video Sequences",
"Rapidly Growing Medium",
"Creative Communities",
"Temporally Coherent Tracking",
"Volumetric Sequences",
"Sparse Setups",
"Noisy Output",
"Highly Incoherent Sequences",
"Affordable Multiview Setups",
"Leverage Temporal Consistency",
"Autonomous Tracking",
"Industries",
"Geometry",
"Computer Vision",
"Target Tracking",
"Conferences",
"Video Sequences",
"Topology"
],
"authors": [
{
"affiliation": "V-SENSE,Trinity College Dublin",
"fullName": "Matthew Moynihan",
"givenName": "Matthew",
"surname": "Moynihan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "V-SENSE,Trinity College Dublin",
"fullName": "Susana Ruano",
"givenName": "Susana",
"surname": "Ruano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Volograms",
"fullName": "Rafael Pagés",
"givenName": "Rafael",
"surname": "Pagés",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "V-SENSE,Trinity College Dublin",
"fullName": "Aljosa Smolic",
"givenName": "Aljosa",
"surname": "Smolic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1659-1668",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0477-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "047700b649",
"articleId": "1uqGCCrT1Go",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "047700b669",
"articleId": "1uqGJ5bwiXu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2016/3568/0/3568a201",
"title": "Detecting Crowd Features in Video Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2016/3568a201/12OmNAoUTj7",
"parentPublication": {
"id": "proceedings/sibgrapi/2016/3568/0",
"title": "2016 29th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/1/01315108",
"title": "Synchronizing video sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315108/12OmNyRg4so",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/1",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2008/3278/0/3278a414",
"title": "Automatic Target Detection and Tracking in FLIR Image Sequences Using Morphological Connected Operator",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2008/3278a414/12OmNzIUfT8",
"parentPublication": {
"id": "proceedings/iih-msp/2008/3278/0",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/3/81833484",
"title": "Adaptive detection for tracking moving biological objects in video microscopy sequences",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81833484/12OmNzdoMWM",
"parentPublication": {
"id": "proceedings/icip/1997/8183/3",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d862",
"title": "Volumetric 3D Tracking by Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d862/12OmNzlUKKP",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2008/3456/0/3456a257",
"title": "Multi Cue Performance Evaluation Metrics for Tracking in Video Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2008/3456a257/12OmNzlUKOu",
"parentPublication": {
"id": "proceedings/dicta/2008/3456/0",
"title": "2008 Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/3/01394540",
"title": "Small and fast moving object detection and tracking in sports video sequences",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394540/12OmNzy7uO4",
"parentPublication": {
"id": "proceedings/icme/2004/8603/3",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0276",
"title": "Video Annotation for Visual Tracking via Selection and Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0276/1BmL3n9GIfe",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctec/2017/5784/0/578400b187",
"title": "Inter-frame Correlation Based on Moving Vehicle Target Detection in Infrared Image Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/icctec/2017/578400b187/1ckrX2atCnu",
"parentPublication": {
"id": "proceedings/icctec/2017/5784/0",
"title": "2017 International Conference on Computer Technology, Electronics and Communication (ICCTEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f742",
"title": "Function4D: Real-time Human Volumetric Capture from Very Sparse Consumer RGBD Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f742/1yeJFKObhAY",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeLdyIKnV6",
"doi": "10.1109/CVPR46437.2021.00704",
"title": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering",
"normalizedTitle": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering",
"abstract": "Recent work [28], [5] has demonstrated that volumetric scene representations combined with differentiable volume rendering can enable photo-realistic rendering for challenging scenes that mesh reconstruction fails on. However, these methods entangle geometry and appearance in a \"black-box\" volume that cannot be edited. Instead, we present an approach that explicitly disentangles geometry—represented as a continuous 3D volume—from appearance—represented as a continuous 2D texture map. We achieve this by introducing a 3D-to-2D texture mapping (or surface parameterization) network into volumetric representations. We constrain this texture mapping network using an additional 2D-to-3D inverse mapping network and a novel cycle consistency loss to make 3D surface points map to 2D texture points that map back to the original 3D points. We demonstrate that this representation can be reconstructed using only multi-view image supervision and generates high-quality rendering results. More importantly, by separating geometry and texture, we allow users to edit appearance by simply editing 2D texture maps.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent work [28], [5] has demonstrated that volumetric scene representations combined with differentiable volume rendering can enable photo-realistic rendering for challenging scenes that mesh reconstruction fails on. However, these methods entangle geometry and appearance in a \"black-box\" volume that cannot be edited. Instead, we present an approach that explicitly disentangles geometry—represented as a continuous 3D volume—from appearance—represented as a continuous 2D texture map. We achieve this by introducing a 3D-to-2D texture mapping (or surface parameterization) network into volumetric representations. We constrain this texture mapping network using an additional 2D-to-3D inverse mapping network and a novel cycle consistency loss to make 3D surface points map to 2D texture points that map back to the original 3D points. We demonstrate that this representation can be reconstructed using only multi-view image supervision and generates high-quality rendering results. More importantly, by separating geometry and texture, we allow users to edit appearance by simply editing 2D texture maps.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent work [28], [5] has demonstrated that volumetric scene representations combined with differentiable volume rendering can enable photo-realistic rendering for challenging scenes that mesh reconstruction fails on. However, these methods entangle geometry and appearance in a \"black-box\" volume that cannot be edited. Instead, we present an approach that explicitly disentangles geometry—represented as a continuous 3D volume—from appearance—represented as a continuous 2D texture map. We achieve this by introducing a 3D-to-2D texture mapping (or surface parameterization) network into volumetric representations. We constrain this texture mapping network using an additional 2D-to-3D inverse mapping network and a novel cycle consistency loss to make 3D surface points map to 2D texture points that map back to the original 3D points. We demonstrate that this representation can be reconstructed using only multi-view image supervision and generates high-quality rendering results. More importantly, by separating geometry and texture, we allow users to edit appearance by simply editing 2D texture maps.",
"fno": "450900h115",
"keywords": [
"Data Visualisation",
"Geometry",
"Image Reconstruction",
"Image Representation",
"Image Texture",
"Rendering Computer Graphics",
"Solid Modelling",
"Continuous 2 D Texture Map",
"Or Surface Parameterization",
"Volumetric Representations",
"Texture Mapping Network",
"2 D To 3 D Inverse Mapping Network",
"3 D Surface Points Map",
"2 D Texture Points",
"Original 3 D Points",
"High Quality Rendering Results",
"2 D Texture Maps",
"Neural Texture Mapping",
"Volumetric Neural Rendering",
"Volumetric Scene Representations",
"Differentiable Volume Rendering",
"Photo Realistic Rendering",
"Challenging Scenes",
"Mesh Reconstruction",
"Methods Entangle Geometry",
"Black Box Volume",
"Geometry Represented",
"Geometry",
"Surface Reconstruction",
"Solid Modeling",
"Computer Vision",
"Three Dimensional Displays",
"Rendering Computer Graphics",
"Cognition"
],
"authors": [
{
"affiliation": "University of California,San Diego",
"fullName": "Fanbo Xiang",
"givenName": "Fanbo",
"surname": "Xiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Zexiang Xu",
"givenName": "Zexiang",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Miloš Hašan",
"givenName": "Miloš",
"surname": "Hašan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Yannick Hold-Geoffroy",
"givenName": "Yannick",
"surname": "Hold-Geoffroy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Kalyan Sunkavalli",
"givenName": "Kalyan",
"surname": "Sunkavalli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California,San Diego",
"fullName": "Hao Su",
"givenName": "Hao",
"surname": "Su",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "7115-7124",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeLdqSdUYw",
"name": "pcvpr202145090-09577392s1-mm_450900h115.zip",
"size": "17.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577392s1-mm_450900h115.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900h104",
"articleId": "1yeLOCxBw5y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900h125",
"articleId": "1yeI1ICFJTy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480772",
"title": "New Rendering Approach for Composable Volumetric Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480772/12OmNBAqZId",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720359",
"title": "Texture-Based Wireframe Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720359/12OmNwCaCvX",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esiat/2009/3682/2/3682b575",
"title": "Rapid Texture-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682b575/12OmNx7G5VW",
"parentPublication": {
"id": "esiat/2009/3682/2",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/1/00754002",
"title": "Rendering Of Texture On 3D Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754002/12OmNxxNbPS",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/1",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2015/7962/0/7962a001",
"title": "Meta-Relief Texture Mapping with Dynamic Texture-Space Ambient Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2015/7962a001/12OmNyp9MiX",
"parentPublication": {
"id": "proceedings/sibgrapi/2015/7962/0",
"title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciap/1999/0040/0/00401055",
"title": "Texture Extraction from Photographs and Rendering with Dynamic Texture Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/1999/00401055/12OmNz61drx",
"parentPublication": {
"id": "proceedings/iciap/1999/0040/0",
"title": "Image Analysis and Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/07/06799298",
"title": "Interactive Mesostructures with Volumetric Collisions",
"doi": null,
"abstractUrl": "/journal/tg/2014/07/06799298/13rRUzp02om",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d501",
"title": "Differentiable Volumetric Rendering: Learning Implicit 3D Representations Without 3D Supervision",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d501/1m3nwXQXEAw",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900g222",
"title": "NeuralHumanFVV: Real-Time Neural Volumetric Human Performance Rendering using RGB Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900g222/1yeIMelAx8s",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/4.509E290",
"title": "Neural Lumigraph Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/4.509E290/1yeKqzS5Igo",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCuDzt9",
"title": "Software Architecture, Working IEEE/IFIP Conference on",
"acronym": "wicsa",
"groupId": "1000680",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAsTgWO",
"doi": "10.1109/WICSA.2008.26",
"title": "On the Quantitative Analysis of Architecture Stability in Aspectual Decompositions",
"normalizedTitle": "On the Quantitative Analysis of Architecture Stability in Aspectual Decompositions",
"abstract": "Architectural aspects are expected to modularize widely-scoped concerns that naturally crosscut the boundaries of system components at the software architecture level. However, there is no empirical knowledge about the positive and negative influences of aspectual decompositions on architecture stability. This paper analyzes the influence exerted by the aspect-oriented composition mechanisms in the stability of crosscutting concerns in an evolving multi-agent software architecture. Our investigation encompassed a comparative analysis of aspectual and non-aspectual decompositions based on different architectural styles. In particular, we assessed various facets of components' and compositions' stability through such alternative designs of the same multi-agent system using conventional quantitative indicators. The evaluation focused upon a number of architecturally-relevant changes that are typically performed through real-life maintenance tasks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Architectural aspects are expected to modularize widely-scoped concerns that naturally crosscut the boundaries of system components at the software architecture level. However, there is no empirical knowledge about the positive and negative influences of aspectual decompositions on architecture stability. This paper analyzes the influence exerted by the aspect-oriented composition mechanisms in the stability of crosscutting concerns in an evolving multi-agent software architecture. Our investigation encompassed a comparative analysis of aspectual and non-aspectual decompositions based on different architectural styles. In particular, we assessed various facets of components' and compositions' stability through such alternative designs of the same multi-agent system using conventional quantitative indicators. The evaluation focused upon a number of architecturally-relevant changes that are typically performed through real-life maintenance tasks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Architectural aspects are expected to modularize widely-scoped concerns that naturally crosscut the boundaries of system components at the software architecture level. However, there is no empirical knowledge about the positive and negative influences of aspectual decompositions on architecture stability. This paper analyzes the influence exerted by the aspect-oriented composition mechanisms in the stability of crosscutting concerns in an evolving multi-agent software architecture. Our investigation encompassed a comparative analysis of aspectual and non-aspectual decompositions based on different architectural styles. In particular, we assessed various facets of components' and compositions' stability through such alternative designs of the same multi-agent system using conventional quantitative indicators. The evaluation focused upon a number of architecturally-relevant changes that are typically performed through real-life maintenance tasks.",
"fno": "3092a029",
"keywords": [
"Multi Agent Systems",
"Object Oriented Programming",
"Program Diagnostics",
"Software Architecture",
"Software Maintenance",
"Software Reliability",
"Software Quantitative Analysis",
"Software Architecture Stability",
"Software Aspectual Decompositions",
"Software Architectural Aspects",
"System Components",
"Aspect Oriented Composition",
"Crosscutting Stability",
"Evolving Multiagent Software Architecture",
"Software Composition Stability",
"Multiagent System",
"Real Life Software Maintenance",
"Stability Analysis",
"Collision Mitigation",
"Software Architecture",
"Computer Architecture",
"Multiagent Systems",
"Software Design",
"Performance Evaluation",
"Pressing",
"Object Oriented Modeling",
"Context Modeling"
],
"authors": [
{
"affiliation": "Università di Bologna, Bologna, Italy",
"fullName": "Ambra Molesini",
"givenName": "Ambra",
"surname": "Molesini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lancaster University, United Kingdom",
"fullName": "Alessandro Garcia",
"givenName": "Alessandro",
"surname": "Garcia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade Federal da Bahia, Brazil",
"fullName": "Christina von Flach Garcia Chavez",
"givenName": "Christina von Flach Garcia",
"surname": "Chavez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade Federal do Rio Grande do Norte, Brazil",
"fullName": "Thais Batista",
"givenName": "Thais",
"surname": "Batista",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wicsa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-02-01T00:00:00",
"pubType": "proceedings",
"pages": "29-38",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3092-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04459136",
"articleId": "12OmNxZTtIr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3092z001",
"articleId": "12OmNypIYyw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ase/2015/0025/0/0025a886",
"title": "Stability of Self-Adaptive Software Architectures",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2015/0025a886/12OmNC1Gugs",
"parentPublication": {
"id": "proceedings/ase/2015/0025/0",
"title": "2015 30th IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wicsa/2009/4984/0/05290817",
"title": "From retrospect to prospect: Assessing modularity and stability from software architecture",
"doi": null,
"abstractUrl": "/proceedings-article/wicsa/2009/05290817/12OmNqzu6R7",
"parentPublication": {
"id": "proceedings/wicsa/2009/4984/0",
"title": "2009 Joint Working IEEE/IFIP Conference on Software Architecture (WICSA) & 3rd European Conference on Software Architecture (ECSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcre/2013/2931/0/06671317",
"title": "Evaluating architecture stability of software projects",
"doi": null,
"abstractUrl": "/proceedings-article/wcre/2013/06671317/12OmNxETapb",
"parentPublication": {
"id": "proceedings/wcre/2013/2931/0",
"title": "2013 20th Working Conference on Reverse Engineering (WCRE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbcars/2010/8707/0/4259a130",
"title": "Concern-Based Assessment of Architectural Stability: A Comparative Study",
"doi": null,
"abstractUrl": "/proceedings-article/sbcars/2010/4259a130/12OmNyKa5Ym",
"parentPublication": {
"id": "proceedings/sbcars/2010/8707/0",
"title": "2010 Fourth Brazilian Symposium on Software Components, Architectures and Reuse",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2006/2832/0/28320122",
"title": "Aspectual mixin layers: aspects and features in concert",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2006/28320122/12OmNyo1o5z",
"parentPublication": {
"id": "proceedings/icse/2006/2832/0",
"title": "Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2003/7983/0/01227480",
"title": "Evaluating software architectures: development, stability, and evolution",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2003/01227480/12OmNzYeB2b",
"parentPublication": {
"id": "proceedings/aiccsa/2003/7983/0",
"title": "ACS/IEEE International Conference on Computer Systems and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wicsa/2004/2172/0/21720005",
"title": "ASAAM: Aspectual Software Architecture Analysis Method",
"doi": null,
"abstractUrl": "/proceedings-article/wicsa/2004/21720005/12OmNzZEAqq",
"parentPublication": {
"id": "proceedings/wicsa/2004/2172/0",
"title": "Software Architecture, Working IEEE/IFIP Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2012/1536/0/06211148",
"title": "Adding Aspects to Software Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2012/06211148/12OmNzZWbCc",
"parentPublication": {
"id": "proceedings/icis/2012/1536/0",
"title": "2012 IEEE/ACIS 11th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/conisoft/2018/6577/0/08645866",
"title": "Software Stability: A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/conisoft/2018/08645866/17QjJfkRxNC",
"parentPublication": {
"id": "proceedings/conisoft/2018/6577/0",
"title": "2018 6th International Conference in Software Engineering Research and Innovation (CONISOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKJiaV",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTs7BJ",
"doi": "10.1109/ICPR.2010.250",
"title": "Modeling and Generalization of Discrete Morse Terrain Decompositions",
"normalizedTitle": "Modeling and Generalization of Discrete Morse Terrain Decompositions",
"abstract": "We address the problem of morphological analysis of real terrains. We describe a morphological model for a terrain by considering extensions of Morse theory to the discrete case. We propose a two-level model of the morphology of a terrain based on a graph joining the critical points of the terrain through integral lines. We present a new set of generalization operators specific for discrete piece-wise linear terrain models, which are used to reduce noise and the size of the morphological representation. We show results of our approach on real terrains.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We address the problem of morphological analysis of real terrains. We describe a morphological model for a terrain by considering extensions of Morse theory to the discrete case. We propose a two-level model of the morphology of a terrain based on a graph joining the critical points of the terrain through integral lines. We present a new set of generalization operators specific for discrete piece-wise linear terrain models, which are used to reduce noise and the size of the morphological representation. We show results of our approach on real terrains.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We address the problem of morphological analysis of real terrains. We describe a morphological model for a terrain by considering extensions of Morse theory to the discrete case. We propose a two-level model of the morphology of a terrain based on a graph joining the critical points of the terrain through integral lines. We present a new set of generalization operators specific for discrete piece-wise linear terrain models, which are used to reduce noise and the size of the morphological representation. We show results of our approach on real terrains.",
"fno": "4109a999",
"keywords": [],
"authors": [
{
"affiliation": null,
"fullName": "Leila De Floriani",
"givenName": "Leila",
"surname": "De Floriani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Paola Magillo",
"givenName": "Paola",
"surname": "Magillo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Maria Vitali",
"givenName": "Maria",
"surname": "Vitali",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "999-1002",
"year": "2010",
"issn": "1051-4651",
"isbn": "978-0-7695-4109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4109a995",
"articleId": "12OmNBfZSku",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4109b003",
"articleId": "12OmNwnH4ND",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdma/2013/5016/0/5016a224",
"title": "Analysis of Terrain Roughness Based on Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a224/12OmNBEpnAQ",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciap/2007/2877/0/28770337",
"title": "Multi-resolution Morse-Smale Complexes for Terrain Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/2007/28770337/12OmNsbY6Tz",
"parentPublication": {
"id": "proceedings/iciap/2007/2877/0",
"title": "2007 14th International Conference on Image Analysis and Processing - ICIAP 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsa/2008/3243/0/3243a441",
"title": "Multi-Scale 3D Morse Complexes",
"doi": null,
"abstractUrl": "/proceedings-article/iccsa/2008/3243a441/12OmNwFRpbe",
"parentPublication": {
"id": "proceedings/iccsa/2008/3243/0",
"title": "2008 International Conference on Computational Sciences and Its Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2013/4983/0/4983a016",
"title": "Using Gait Change for Terrain Sensing by Robots",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2013/4983a016/12OmNzd7bYc",
"parentPublication": {
"id": "proceedings/crv/2013/4983/0",
"title": "2013 International Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050799",
"title": "Hierarchy of Stable Morse Decompositions",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050799/13rRUB7a111",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/05/v0499",
"title": "Applications of Forman's Discrete Morse Theory to Topology Visualization and Mesh Compression",
"doi": null,
"abstractUrl": "/journal/tg/2004/05/v0499/13rRUwI5TXr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/08/ttp2011081646",
"title": "Theory and Algorithms for Constructing Discrete Morse Complexes from Grayscale Digital Images",
"doi": null,
"abstractUrl": "/journal/tp/2011/08/ttp2011081646/13rRUx0xPoa",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/03/06873268",
"title": "Skeletonization and Partitioning of Digital Images Using Discrete Morse Theory",
"doi": null,
"abstractUrl": "/journal/tp/2015/03/06873268/13rRUx0xPod",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/04/ttg2008040848",
"title": "Efficient Morse Decompositions of Vector Fields",
"doi": null,
"abstractUrl": "/journal/tg/2008/04/ttg2008040848/13rRUxjQyvc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/06/ttg2012060938",
"title": "Robust Morse Decompositions of Piecewise Constant Vector Fields",
"doi": null,
"abstractUrl": "/journal/tg/2012/06/ttg2012060938/13rRUxlgxOj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrMHOdg",
"title": "2017 31st International Conference on Advanced Information Networking and Applications: Workshops (WAINA)",
"acronym": "waina",
"groupId": "1001766",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyyO8H8",
"doi": "10.1109/WAINA.2017.105",
"title": "An Evaluation of Psychological-Competitive Ability for Rugby Players Using the Analytic Hierarchy Process",
"normalizedTitle": "An Evaluation of Psychological-Competitive Ability for Rugby Players Using the Analytic Hierarchy Process",
"abstract": "The Analytic Hierarchy Process (AHP) is the decision-making support method in order to select a solution from alternatives based on a number of evaluation criteria. This method has been also utilized to evaluate athletes' competitive ability regarding their physical, technical, and mental aspects in recent years. Therefore, this study tried to develop an evaluation index of Rugby players' psychological-competitive ability as their mental aspects. At first, a questionnaire survey on sixty-seven male university rugby players was conducted using Diagnostic Inventory of Psychological-Competitive Ability for Athletes (DIPCA.3). Next, an interview survey on a Rugby coach was carried out for ranking five factors of DIPCA.3 depending on each factor's importance. After that, total evaluation values of each Rugby player were calculated from the scores of DIPCA.3 and the ranking of five factors. The Rugby players were divided into high or low group in accordance with their total scores of DIPCA.3 or total evaluation values of AHP. Comparing the high and low group of the total evaluation values, the proportion of Rugby players on a high level, who were regular players and had many experience of participating in national competitions, was larger among the high group than the low group significantly. The results of this study suggested that the total evaluation value calculated with AHP was more effective than the total scores of DIPCA.3 for evaluation of Rugby players' mental aspects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Analytic Hierarchy Process (AHP) is the decision-making support method in order to select a solution from alternatives based on a number of evaluation criteria. This method has been also utilized to evaluate athletes' competitive ability regarding their physical, technical, and mental aspects in recent years. Therefore, this study tried to develop an evaluation index of Rugby players' psychological-competitive ability as their mental aspects. At first, a questionnaire survey on sixty-seven male university rugby players was conducted using Diagnostic Inventory of Psychological-Competitive Ability for Athletes (DIPCA.3). Next, an interview survey on a Rugby coach was carried out for ranking five factors of DIPCA.3 depending on each factor's importance. After that, total evaluation values of each Rugby player were calculated from the scores of DIPCA.3 and the ranking of five factors. The Rugby players were divided into high or low group in accordance with their total scores of DIPCA.3 or total evaluation values of AHP. Comparing the high and low group of the total evaluation values, the proportion of Rugby players on a high level, who were regular players and had many experience of participating in national competitions, was larger among the high group than the low group significantly. The results of this study suggested that the total evaluation value calculated with AHP was more effective than the total scores of DIPCA.3 for evaluation of Rugby players' mental aspects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Analytic Hierarchy Process (AHP) is the decision-making support method in order to select a solution from alternatives based on a number of evaluation criteria. This method has been also utilized to evaluate athletes' competitive ability regarding their physical, technical, and mental aspects in recent years. Therefore, this study tried to develop an evaluation index of Rugby players' psychological-competitive ability as their mental aspects. At first, a questionnaire survey on sixty-seven male university rugby players was conducted using Diagnostic Inventory of Psychological-Competitive Ability for Athletes (DIPCA.3). Next, an interview survey on a Rugby coach was carried out for ranking five factors of DIPCA.3 depending on each factor's importance. After that, total evaluation values of each Rugby player were calculated from the scores of DIPCA.3 and the ranking of five factors. The Rugby players were divided into high or low group in accordance with their total scores of DIPCA.3 or total evaluation values of AHP. Comparing the high and low group of the total evaluation values, the proportion of Rugby players on a high level, who were regular players and had many experience of participating in national competitions, was larger among the high group than the low group significantly. The results of this study suggested that the total evaluation value calculated with AHP was more effective than the total scores of DIPCA.3 for evaluation of Rugby players' mental aspects.",
"fno": "6231a438",
"keywords": [
"Psychology",
"Analytic Hierarchy Process",
"Interviews",
"Games",
"Numerical Stability",
"Stability Criteria",
"Analytic Hierarchy Process",
"Diagnostic Inventory Of Psychological Competitive Ability For Athletes",
"Decision Making Support",
"Rugby Players",
"Interview Survey"
],
"authors": [
{
"affiliation": null,
"fullName": "Koichiro Aoki",
"givenName": "Koichiro",
"surname": "Aoki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Minoru Uehara",
"givenName": "Minoru",
"surname": "Uehara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chieko Kato",
"givenName": "Chieko",
"surname": "Kato",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hiroyuki Hirahara",
"givenName": "Hiroyuki",
"surname": "Hirahara",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "waina",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-03-01T00:00:00",
"pubType": "proceedings",
"pages": "438-442",
"year": "2017",
"issn": null,
"isbn": "978-1-5090-6231-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6231a433",
"articleId": "12OmNzsrwim",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6231a443",
"articleId": "12OmNzXnNCS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicta/2008/3357/1/3357a941",
"title": "Study on Project Experts' Evaluation Based on Analytic Hierarchy Process and Fuzzy Comprehensive Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357a941/12OmNA1DMl0",
"parentPublication": {
"id": "proceedings/icicta/2008/3357/1",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceet/2009/3819/1/3819a631",
"title": "Evaluating the Double-Stage Coupled Heat Pump System Using Analytic Hierarchy Process Based on Entropy Weight",
"doi": null,
"abstractUrl": "/proceedings-article/iceet/2009/3819a631/12OmNAGepUA",
"parentPublication": {
"id": "proceedings/iceet/2009/3819/1",
"title": "Energy and Environment Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2017/6231/0/6231a455",
"title": "Relationship between a Rugby Player’s Personality, Length of Experience and Personality and Position",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2017/6231a455/12OmNrkBwna",
"parentPublication": {
"id": "proceedings/waina/2017/6231/0",
"title": "2017 31st International Conference on Advanced Information Networking and Applications: Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/3/3987c449",
"title": "A Study on Teaching Quality Evaluation Based on Analytic Hierarchy Process",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987c449/12OmNvDZET8",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/3",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/1/3336a411",
"title": "Aeroengine Health Assessment Using a Web-Based Grey Analytic Hierarchy Process",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336a411/12OmNvIfDN9",
"parentPublication": {
"id": "proceedings/csse/2008/3336/1",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsgea/2016/3578/0/07733824",
"title": "Urban Sports Level Evaluation Based on Improved Fuzzy Analytic Hierarchy Process",
"doi": null,
"abstractUrl": "/proceedings-article/icsgea/2016/07733824/12OmNyNQSDz",
"parentPublication": {
"id": "proceedings/icsgea/2016/3578/0",
"title": "2016 International Conference on Smart Grid and Electrical Automation (ICSGEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icim/2009/3911/0/3911a088",
"title": "Comprehensive Evaluation of CDIO Model Teachers' Classroom Teaching Quality Based on Fuzzy Analytic Hierarchy Process",
"doi": null,
"abstractUrl": "/proceedings-article/icim/2009/3911a088/12OmNzlD9h8",
"parentPublication": {
"id": "proceedings/icim/2009/3911/0",
"title": "Innovation Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsmt/2021/2063/0/206300a237",
"title": "Research on the Optimal Strategy of Enterprise Raw Material Supply Chain Based on Analytic Hierarchy Process and Comprehensive Evaluation Method",
"doi": null,
"abstractUrl": "/proceedings-article/iccsmt/2021/206300a237/1E2we6dnig0",
"parentPublication": {
"id": "proceedings/iccsmt/2021/2063/0",
"title": "2021 2nd International Conference on Computer Science and Management Technology (ICCSMT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2020/9919/0/991900a248",
"title": "Cluster analysis of Psychological Characteristics in Elite Level Rugby Players who Belong to Companies",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2020/991900a248/1rqECj48N4A",
"parentPublication": {
"id": "proceedings/candarw/2020/9919/0",
"title": "2020 Eighth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2020/1969/0/196900a719",
"title": "Research on the Application of Analytic Hierarchy Process in Human Resource Post Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2020/196900a719/1wG61PrhpLO",
"parentPublication": {
"id": "proceedings/icris/2020/1969/0",
"title": "2020 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBtl1Ay",
"title": "2011 IEEE International Conference on Granular Computing",
"acronym": "grc",
"groupId": "1001626",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNA0MZ1l",
"doi": "10.1109/GRC.2011.6122581",
"title": "A new tongue model based on muscle-control",
"normalizedTitle": "A new tongue model based on muscle-control",
"abstract": "Talking head system is an interactive virtual reality technique in human-computer interaction. During the development of talking head systems, the modeling of tongue has received little attention. One important reason is that tongue is an extremely complex organ; therefore it is rather complex to animate. However, tongue is important in realistic simulation of talking head systems. To model tongue realistically with acceptable computing resource, this paper presents a three dimensional muscle-controlled tongue model (MCTM). We adopt the approach from the surgery to classify tongue muscles into intrinsic and extrinsic ones. Two models are built for each kind of muscle. The intrinsic model is employed to control the deformation of tongue. And the extrinsic model is employed to control the holistic tongue movement. When the positions of these muscles are established, we can control tongue model to move side-to-side and change different shapes by using different muscle-shrinkages. The experiments show that this method can simulate typical tongue movements such as front raising and back raising.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Talking head system is an interactive virtual reality technique in human-computer interaction. During the development of talking head systems, the modeling of tongue has received little attention. One important reason is that tongue is an extremely complex organ; therefore it is rather complex to animate. However, tongue is important in realistic simulation of talking head systems. To model tongue realistically with acceptable computing resource, this paper presents a three dimensional muscle-controlled tongue model (MCTM). We adopt the approach from the surgery to classify tongue muscles into intrinsic and extrinsic ones. Two models are built for each kind of muscle. The intrinsic model is employed to control the deformation of tongue. And the extrinsic model is employed to control the holistic tongue movement. When the positions of these muscles are established, we can control tongue model to move side-to-side and change different shapes by using different muscle-shrinkages. The experiments show that this method can simulate typical tongue movements such as front raising and back raising.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Talking head system is an interactive virtual reality technique in human-computer interaction. During the development of talking head systems, the modeling of tongue has received little attention. One important reason is that tongue is an extremely complex organ; therefore it is rather complex to animate. However, tongue is important in realistic simulation of talking head systems. To model tongue realistically with acceptable computing resource, this paper presents a three dimensional muscle-controlled tongue model (MCTM). We adopt the approach from the surgery to classify tongue muscles into intrinsic and extrinsic ones. Two models are built for each kind of muscle. The intrinsic model is employed to control the deformation of tongue. And the extrinsic model is employed to control the holistic tongue movement. When the positions of these muscles are established, we can control tongue model to move side-to-side and change different shapes by using different muscle-shrinkages. The experiments show that this method can simulate typical tongue movements such as front raising and back raising.",
"fno": "06122581",
"keywords": [
"Computer Animation",
"Human Computer Interaction",
"Muscle",
"Virtual Reality",
"Interactive Virtual Reality Technique",
"Human Computer Interaction",
"Talking Head Systems",
"Computing Resource",
"Three Dimensional Muscle Controlled Tongue Model",
"Tongue Muscles",
"Tongue Deformation",
"Extrinsic Model",
"Intrinsic Model",
"Holistic Tongue Movement",
"Tongue",
"Muscles",
"Computational Modeling",
"Shape",
"Solid Modeling",
"Biological System Modeling",
"Speech",
"Talking Head",
"Virtual Face",
"Tongue Model",
"Muscle Model"
],
"authors": [
{
"affiliation": "Lab of Granular Computing, Zhangzhou Normal University, Zhangzhou 363000, China",
"fullName": "Zhixiang Chen",
"givenName": "Zhixiang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lab of Granular Computing, Zhangzhou Normal University, Zhangzhou 363000, China",
"fullName": "Xinjing Zhang",
"givenName": "Xinjing",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lab of Granular Computing, Zhangzhou Normal University, Zhangzhou 363000, China",
"fullName": "Zhenrong Wu",
"givenName": "Zhenrong",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "grc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "132-137",
"year": "2011",
"issn": null,
"isbn": "978-1-4577-0372-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06122580",
"articleId": "12OmNxUMHo4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06122582",
"articleId": "12OmNqN6R60",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2016/1611/0/07822762",
"title": "A fast and precise speech-triggered tongue animation system by combining parameterized model and anatomical model",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822762/12OmNC1Y5r1",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2012/4357/0/06399752",
"title": "An image-based method for quantification of lateral pterygoid muscle deformation",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2012/06399752/12OmNqBKUdf",
"parentPublication": {
"id": "proceedings/bibe/2012/4357/0",
"title": "2012 IEEE 12th International Conference on Bioinformatics & Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2012/4357/0/06399749",
"title": "Image-based estimation of biomechanical relationship between masticatory muscle activities and mandibular movement",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2012/06399749/12OmNwCJORK",
"parentPublication": {
"id": "proceedings/bibe/2012/4357/0",
"title": "2012 IEEE 12th International Conference on Bioinformatics & Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2013/5159/0/06726380",
"title": "Tongue Rehabilitation Training Method of Hearing-Impaired Child Based on Visualization Model",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2013/06726380/12OmNwNwzIC",
"parentPublication": {
"id": "proceedings/msn/2013/5159/0",
"title": "2013 Ninth International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fcst/2010/7779/0/05575582",
"title": "Pronouncing Rehabilitation of Hearing-Impaired Children Based on Chinese 3D Visual-Speech Database",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2010/05575582/12OmNykkB6z",
"parentPublication": {
"id": "proceedings/fcst/2010/7779/0",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890595",
"title": "Modeling a realistic 3D physiological tongue for visual speech synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890595/12OmNyuy9UE",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019362",
"title": "From talking head to singing head: A significant enhancement for more natural human computer interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019362/12OmNzIUfQ1",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hbdss/2021/2188/0/218800a086",
"title": "Telemetry Data Analysis on sEMG Major Muscle Groups in Hanging-up-and-passing-through Horizontal Ladder of 400m Obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/hbdss/2021/218800a086/1AqwRXXF1fi",
"parentPublication": {
"id": "proceedings/hbdss/2021/2188/0",
"title": "2021 International Conference on Health Big Data and Smart Sports (HBDSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0374",
"title": "Speech Driven Tongue Animation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0374/1H1lUSkaTeg",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2022/8487/0/848700a107",
"title": "Effects of Increased Arm Muscle Tone on Postural Recovery from External Forces: A simulation study",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2022/848700a107/1J6hFO9CDDi",
"parentPublication": {
"id": "proceedings/bibe/2022/8487/0",
"title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": null,
"article": {
"id": "12OmNrAv3CY",
"doi": "10.1109/ICCSEE.2012.11",
"title": "A Prior Knowledge-Based Algorithm for Tongue Body Segmentation",
"normalizedTitle": "A Prior Knowledge-Based Algorithm for Tongue Body Segmentation",
"abstract": "Because tongue image segmentation is an important procedure in the tongue characterization, its accuracy affects the following automatic process directly. In this paper, we propose a new tongue segmentation approach based on the combination a feature of tongue shape and the Snakes Correction model. We firstly get a rough tongue contour through using the features of tongue image in HIS color model, then correct a preliminary tongue contour with the feature of tongue shape, and apply this result to the Snake model for final result. The experimental results indicate that the approach is efficiently segmented tongue image.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Because tongue image segmentation is an important procedure in the tongue characterization, its accuracy affects the following automatic process directly. In this paper, we propose a new tongue segmentation approach based on the combination a feature of tongue shape and the Snakes Correction model. We firstly get a rough tongue contour through using the features of tongue image in HIS color model, then correct a preliminary tongue contour with the feature of tongue shape, and apply this result to the Snake model for final result. The experimental results indicate that the approach is efficiently segmented tongue image.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Because tongue image segmentation is an important procedure in the tongue characterization, its accuracy affects the following automatic process directly. In this paper, we propose a new tongue segmentation approach based on the combination a feature of tongue shape and the Snakes Correction model. We firstly get a rough tongue contour through using the features of tongue image in HIS color model, then correct a preliminary tongue contour with the feature of tongue shape, and apply this result to the Snake model for final result. The experimental results indicate that the approach is efficiently segmented tongue image.",
"fno": "4647b646",
"keywords": [
"Tongue Segmentation",
"Tongue Shape Feature",
"Snake Model",
"Shape Template"
],
"authors": [
{
"affiliation": null,
"fullName": "Chao Liang",
"givenName": "Chao",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dongcheng Shi",
"givenName": "Dongcheng",
"surname": "Shi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccsee",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-03-01T00:00:00",
"pubType": "proceedings",
"pages": "646-649",
"year": "2012",
"issn": null,
"isbn": "978-0-7695-4647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4647b639",
"articleId": "12OmNy5zsug",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4647b650",
"articleId": "12OmNzlUKQy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dasc/2009/3929/0/3929a413",
"title": "Automatic Segmentation in Tongue Image by Mouth Location and Active Appearance Model",
"doi": null,
"abstractUrl": "/proceedings-article/dasc/2009/3929a413/12OmNqHItCG",
"parentPublication": {
"id": "proceedings/dasc/2009/3929/0",
"title": "Dependable, Autonomic and Secure Computing, IEEE International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217710",
"title": "Automatic tongue image matting for remote medical diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217710/12OmNs4S8yH",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifita/2009/3600/2/3600b768",
"title": "Application of Image Segmentation Technique in Tongue Diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/ifita/2009/3600b768/12OmNwE9OOj",
"parentPublication": {
"id": "proceedings/ifita/2009/3600/2",
"title": "2009 International Forum on Information Technology and Applications (IFITA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410437",
"title": "Combination of polar edge detection and active contour model for automated tongue segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410437/12OmNxcdFWE",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2012/2746/0/06470316",
"title": "Features for automated tongue image shape classification",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2012/06470316/12OmNySosJ0",
"parentPublication": {
"id": "proceedings/bibmw/2012/2746/0",
"title": "2012 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/1/169510616",
"title": "On Automated Tongue Image Segmentation in Chinese Medicine",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169510616/12OmNzWfp8f",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/1",
"title": "Proceedings of 16th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2017/3013/0/3013a752",
"title": "Tongue Image Segmentation via Color Decomposition and Thresholding",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2017/3013a752/12OmNzlD9fB",
"parentPublication": {
"id": "proceedings/icisce/2017/3013/0",
"title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669783",
"title": "Moment Invariants with Data Augmentation for Tongue Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669783/1A9W9etJZO8",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313262",
"title": "Deep Learning for Automatic Tracking of Tongue Surface in Real-time Ultrasound Videos, Landmarks instead of Contours",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313262/1qmfKK6Gvlu",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412845",
"title": "DE-Net: Dilated Encoder Network for Automated Tongue Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412845/1tmhP2ZNmP6",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBBhN9t",
"title": "Medical Imaging and Augmented Reality, International Workshop on",
"acronym": "miar",
"groupId": "1002236",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMZpoL",
"doi": "10.1109/MIAR.2001.930274",
"title": "Realistic Deformable Models for Simulating the Tongue during Laryngoscopy",
"normalizedTitle": "Realistic Deformable Models for Simulating the Tongue during Laryngoscopy",
"abstract": "Abstract: During the procedure of Laryngoscopy, an anaesthetist uses a rigid blade to displace and compress the tongue of the patient, and then inserts a tube into the larynx to allow controlled ventilation of the lungs during an operation. This procedure can sometimes be difficult and even life threatening, and there is therefore a need for regular training. Currently, plastic models are used for this purpose, and these have many disadvantages. Computer simulation is an attractive alternative, however, for proper realism it is necessary to build a model of the upper airway. In particular, we need a deformable model that can realistically simulate the behaviour of the tongue as it is compressed by the blade. We start from medical images, extract the details that characterise the subject, and then incorporate these in a finite element model to investigate how the tongue tissue behaves in response to the insertion of the blade, when it is subjected to a variety of loading conditions. The results show that, within a specific set of tongue material parameters, the simulated outcome can be successfully related to the experimental laryngoscopic studies. Further research is underway to apply these results in a virtual reality simulation for laryngoscopic training. One main problem to be solved is computing the deformations in real time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract: During the procedure of Laryngoscopy, an anaesthetist uses a rigid blade to displace and compress the tongue of the patient, and then inserts a tube into the larynx to allow controlled ventilation of the lungs during an operation. This procedure can sometimes be difficult and even life threatening, and there is therefore a need for regular training. Currently, plastic models are used for this purpose, and these have many disadvantages. Computer simulation is an attractive alternative, however, for proper realism it is necessary to build a model of the upper airway. In particular, we need a deformable model that can realistically simulate the behaviour of the tongue as it is compressed by the blade. We start from medical images, extract the details that characterise the subject, and then incorporate these in a finite element model to investigate how the tongue tissue behaves in response to the insertion of the blade, when it is subjected to a variety of loading conditions. The results show that, within a specific set of tongue material parameters, the simulated outcome can be successfully related to the experimental laryngoscopic studies. Further research is underway to apply these results in a virtual reality simulation for laryngoscopic training. One main problem to be solved is computing the deformations in real time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract: During the procedure of Laryngoscopy, an anaesthetist uses a rigid blade to displace and compress the tongue of the patient, and then inserts a tube into the larynx to allow controlled ventilation of the lungs during an operation. This procedure can sometimes be difficult and even life threatening, and there is therefore a need for regular training. Currently, plastic models are used for this purpose, and these have many disadvantages. Computer simulation is an attractive alternative, however, for proper realism it is necessary to build a model of the upper airway. In particular, we need a deformable model that can realistically simulate the behaviour of the tongue as it is compressed by the blade. We start from medical images, extract the details that characterise the subject, and then incorporate these in a finite element model to investigate how the tongue tissue behaves in response to the insertion of the blade, when it is subjected to a variety of loading conditions. The results show that, within a specific set of tongue material parameters, the simulated outcome can be successfully related to the experimental laryngoscopic studies. Further research is underway to apply these results in a virtual reality simulation for laryngoscopic training. One main problem to be solved is computing the deformations in real time.",
"fno": "11130125",
"keywords": [
"Deformable Models",
"Simulation",
"Tongue",
"Laryngoscopy"
],
"authors": [
{
"affiliation": "Federal University of Cear?",
"fullName": "M.A.F. Rodrigues",
"givenName": "M.A.F.",
"surname": "Rodrigues",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College",
"fullName": "D.F. Gillies",
"givenName": "D.F.",
"surname": "Gillies",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Hospital Aintree",
"fullName": "P. Charters",
"givenName": "P.",
"surname": "Charters",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "miar",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-06-01T00:00:00",
"pubType": "proceedings",
"pages": "0125",
"year": "2001",
"issn": null,
"isbn": "0-7695-1113-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "11130117",
"articleId": "12OmNANkoiC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "11130131",
"articleId": "12OmNAKcNLm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvmowOB",
"doi": "10.1109/VR.2017.7892238",
"title": "Acoustic VR in the mouth: A real-time speech-driven visual tongue system",
"normalizedTitle": "Acoustic VR in the mouth: A real-time speech-driven visual tongue system",
"abstract": "We propose an acoustic-VR system that converts acoustic signals of human language (Chinese) to realistic 3D tongue animation sequences in real time. It is known that directly capturing the 3D geometry of the tongue at a frame rate that matches the tongue's swift movement during the language production is challenging. This difficulty is handled by utilizing the electromagnetic articulography (EMA) sensor as the intermediate medium linking the acoustic data to the simulated virtual reality. We leverage Deep Neural Networks to train a model that maps the input acoustic signals to the positional information of pre-defined EMA sensors based on 1,108 utterances. Afterwards, we develop a novel reduced physics-based dynamics model for simulating the tongue's motion. Unlike the existing methods, our deformable model is nonlinear, volume-preserving, and accommodates collision between the tongue and the oral cavity (mostly with the jaw). The tongue's deformation could be highly localized which imposes extra difficulties for existing spectral model reduction methods. Alternatively, we adopt a spatial reduction method that allows an expressive subspace representation of the tongue's deformation. We systematically evaluate the simulated tongue shapes with real-world shapes acquired by MRI/CT. Our experiment demonstrates that the proposed system is able to deliver a realistic visual tongue animation corresponding to a user's speech signal.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose an acoustic-VR system that converts acoustic signals of human language (Chinese) to realistic 3D tongue animation sequences in real time. It is known that directly capturing the 3D geometry of the tongue at a frame rate that matches the tongue's swift movement during the language production is challenging. This difficulty is handled by utilizing the electromagnetic articulography (EMA) sensor as the intermediate medium linking the acoustic data to the simulated virtual reality. We leverage Deep Neural Networks to train a model that maps the input acoustic signals to the positional information of pre-defined EMA sensors based on 1,108 utterances. Afterwards, we develop a novel reduced physics-based dynamics model for simulating the tongue's motion. Unlike the existing methods, our deformable model is nonlinear, volume-preserving, and accommodates collision between the tongue and the oral cavity (mostly with the jaw). The tongue's deformation could be highly localized which imposes extra difficulties for existing spectral model reduction methods. Alternatively, we adopt a spatial reduction method that allows an expressive subspace representation of the tongue's deformation. We systematically evaluate the simulated tongue shapes with real-world shapes acquired by MRI/CT. Our experiment demonstrates that the proposed system is able to deliver a realistic visual tongue animation corresponding to a user's speech signal.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose an acoustic-VR system that converts acoustic signals of human language (Chinese) to realistic 3D tongue animation sequences in real time. It is known that directly capturing the 3D geometry of the tongue at a frame rate that matches the tongue's swift movement during the language production is challenging. This difficulty is handled by utilizing the electromagnetic articulography (EMA) sensor as the intermediate medium linking the acoustic data to the simulated virtual reality. We leverage Deep Neural Networks to train a model that maps the input acoustic signals to the positional information of pre-defined EMA sensors based on 1,108 utterances. Afterwards, we develop a novel reduced physics-based dynamics model for simulating the tongue's motion. Unlike the existing methods, our deformable model is nonlinear, volume-preserving, and accommodates collision between the tongue and the oral cavity (mostly with the jaw). The tongue's deformation could be highly localized which imposes extra difficulties for existing spectral model reduction methods. Alternatively, we adopt a spatial reduction method that allows an expressive subspace representation of the tongue's deformation. We systematically evaluate the simulated tongue shapes with real-world shapes acquired by MRI/CT. Our experiment demonstrates that the proposed system is able to deliver a realistic visual tongue animation corresponding to a user's speech signal.",
"fno": "07892238",
"keywords": [
"Tongue",
"Speech",
"Magnetic Resonance Imaging",
"Hidden Markov Models",
"Solid Modeling",
"Real Time Systems",
"Three Dimensional Displays",
"H 5 1 Information Systems Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Department of Electrical and Computer Engineering, The University of New Mexico, USA",
"fullName": "Ran Luo",
"givenName": "Ran",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Linguistics, Chinese Academy of Social Sciences, China",
"fullName": "Qiang Fang",
"givenName": "Qiang",
"surname": "Fang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Software, Tianjin University, China",
"fullName": "Jianguo Wei",
"givenName": "Jianguo",
"surname": "Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Software, Tianjin University, China",
"fullName": "Wenhuan Lu",
"givenName": "Wenhuan",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Key Laboratory of CAD & CG, Zhejiang University, China",
"fullName": "Weiwei Xu",
"givenName": "Weiwei",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electrical and Computer Engineering, The University of New Mexico, USA",
"fullName": "Yin Yang",
"givenName": "Yin",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "112-121",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892237",
"articleId": "12OmNzh5z4G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892239",
"articleId": "12OmNxWcHbV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/1/01326078",
"title": "Speech synthesis from real time ultrasound images of the tongue",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326078/12OmNBSSVna",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2016/1611/0/07822762",
"title": "A fast and precise speech-triggered tongue animation system by combining parameterized model and anatomical model",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822762/12OmNC1Y5r1",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc/2009/3929/0/3929a413",
"title": "Automatic Segmentation in Tongue Image by Mouth Location and Active Appearance Model",
"doi": null,
"abstractUrl": "/proceedings-article/dasc/2009/3929a413/12OmNqHItCG",
"parentPublication": {
"id": "proceedings/dasc/2009/3929/0",
"title": "Dependable, Autonomic and Secure Computing, IEEE International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2016/1269/0/07760013",
"title": "Assistive technology for physically challenged or paralyzed person using voluntary tongue movement",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07760013/12OmNwErpTm",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/caapwd/1992/2730/0/00217393",
"title": "An animated display of tongue, lip and jaw movements during speech: A proper basis for speech aids to the handicapped and other speech technologies",
"doi": null,
"abstractUrl": "/proceedings-article/caapwd/1992/00217393/12OmNyLiur9",
"parentPublication": {
"id": "proceedings/caapwd/1992/2730/0",
"title": "Proceedings of the Johns Hopkins National Search for Computing Applications to Assist Persons with Disabilities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00324008",
"title": "Modeling and animating the human tongue during speech production",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00324008/12OmNzcPAMk",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2015/10/mco2015100054",
"title": "Toward Silent-Speech Control of Consumer Wearables",
"doi": null,
"abstractUrl": "/magazine/co/2015/10/mco2015100054/13rRUILLkqO",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050811",
"title": "Physics-Based Deformable Tongue Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050811/13rRUNvgziC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0374",
"title": "Speech Driven Tongue Animation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0374/1H1lUSkaTeg",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09953323",
"title": "Tongue-Jaw Movement Recognition Through Acoustic Sensing on Smartphones",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09953323/1IlJCEDjYC4",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx8wTfL",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx0RIKz",
"doi": "10.1109/ICPR.2008.4761651",
"title": "Tongue line extraction",
"normalizedTitle": "Tongue line extraction",
"abstract": "Tongue line refers to the surface of the tongue covered with fissures or lines in deep or shallow shape and is one type of important features in clinical practice of Traditional Chinese Tongue Diagnosis (TCTD). However, it is hard to extract tongue lines completely due to the large variation of the widths of tongue lines and the strong noise caused by the rough surface of tongue and uneven illumination. In this paper, an improved wide line detector (WLD) is presented for tongue line extraction. Based on the characteristics of tongue lines, the original WLD is improved to avoid the undesired separation of a wide line and the influence of uneven lighting conditions. The proposed method has been tested on a total of 286 tongue line images and our experimental results demonstrate that the improved WLD significantly outperforms the original WLD for tongue line extraction by improving the TPR 16.5%, FPR 44.6% and PM 33.4%, respectively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Tongue line refers to the surface of the tongue covered with fissures or lines in deep or shallow shape and is one type of important features in clinical practice of Traditional Chinese Tongue Diagnosis (TCTD). However, it is hard to extract tongue lines completely due to the large variation of the widths of tongue lines and the strong noise caused by the rough surface of tongue and uneven illumination. In this paper, an improved wide line detector (WLD) is presented for tongue line extraction. Based on the characteristics of tongue lines, the original WLD is improved to avoid the undesired separation of a wide line and the influence of uneven lighting conditions. The proposed method has been tested on a total of 286 tongue line images and our experimental results demonstrate that the improved WLD significantly outperforms the original WLD for tongue line extraction by improving the TPR 16.5%, FPR 44.6% and PM 33.4%, respectively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Tongue line refers to the surface of the tongue covered with fissures or lines in deep or shallow shape and is one type of important features in clinical practice of Traditional Chinese Tongue Diagnosis (TCTD). However, it is hard to extract tongue lines completely due to the large variation of the widths of tongue lines and the strong noise caused by the rough surface of tongue and uneven illumination. In this paper, an improved wide line detector (WLD) is presented for tongue line extraction. Based on the characteristics of tongue lines, the original WLD is improved to avoid the undesired separation of a wide line and the influence of uneven lighting conditions. The proposed method has been tested on a total of 286 tongue line images and our experimental results demonstrate that the improved WLD significantly outperforms the original WLD for tongue line extraction by improving the TPR 16.5%, FPR 44.6% and PM 33.4%, respectively.",
"fno": "04761651",
"keywords": [
"Feature Extraction",
"Medical Image Processing",
"Tongue Line Extraction",
"Traditional Chinese Tongue Diagnosis",
"Wide Line Detector",
"Uneven Lighting Conditions",
"Tongue",
"Shape",
"Detectors",
"Rough Surfaces",
"Surface Roughness",
"Kernel",
"Nonlinear Filters",
"Biometrics",
"Computer Science",
"Noise Shaping"
],
"authors": [
{
"affiliation": "Biometrics Research Centre, Department of Computing, The Hong Kong Polytechnic University, Hong Kong",
"fullName": "Laura Li Liu",
"givenName": "Laura Li",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Biometrics Research Centre, Department of Computing, The Hong Kong Polytechnic University, Hong Kong",
"fullName": "David Zhang",
"givenName": "David",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Biometrics Research Centre, Department of Computing, The Hong Kong Polytechnic University, Hong Kong",
"fullName": "Ajay Kumar",
"givenName": "Ajay",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science and Technology, Harbin Institute of Technology, 150001 China",
"fullName": "Xiangqian Wu",
"givenName": "Xiangqian",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1051-4651",
"isbn": "978-1-4244-2174-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04761650",
"articleId": "12OmNApcubT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04761652",
"articleId": "12OmNxHJ9uy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2016/1611/0/07822719",
"title": "Tongue fur detection on the smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822719/12OmNAlNizD",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2014/5669/0/06999331",
"title": "The study of abnormal red tongue tip chromatic measurements and analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999331/12OmNClQ0tB",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/2/4647b646",
"title": "A Prior Knowledge-Based Algorithm for Tongue Body Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647b646/12OmNrAv3CY",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2017/0563/0/08273593",
"title": "Stress measurement from tongue color imaging",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273593/12OmNrNh0KC",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2014/5669/0/06999328",
"title": "Cracked tongue recognition using statistic feature",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999328/12OmNwcUk05",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csnt/2012/4692/0/4692a203",
"title": "Tongue Image Extraction Technique from Face and Its Application in Public Use System (Banking)",
"doi": null,
"abstractUrl": "/proceedings-article/csnt/2012/4692a203/12OmNwkzusC",
"parentPublication": {
"id": "proceedings/csnt/2012/4692/0",
"title": "Communication Systems and Network Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2012/2746/0/06470316",
"title": "Features for automated tongue image shape classification",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2012/06470316/12OmNySosJ0",
"parentPublication": {
"id": "proceedings/bibmw/2012/2746/0",
"title": "2012 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050811",
"title": "Physics-Based Deformable Tongue Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050811/13rRUNvgziC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taai/2018/1229/0/122900a014",
"title": "Tongue Fissure Visualization with Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2018/122900a014/17D45X2fUEP",
"parentPublication": {
"id": "proceedings/taai/2018/1229/0",
"title": "2018 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2020/9986/0/998600a109",
"title": "A Tongue Segmentation Algorithm Based on LBP Feature and Cascade Classifier",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2020/998600a109/1tweTCypXEc",
"parentPublication": {
"id": "proceedings/aiam/2020/9986/0",
"title": "2020 2nd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy3iFum",
"title": "2012 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"acronym": "bibmw",
"groupId": "1001585",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNySosJ0",
"doi": "10.1109/BIBMW.2012.6470316",
"title": "Features for automated tongue image shape classification",
"normalizedTitle": "Features for automated tongue image shape classification",
"abstract": "Inspection of the tongue is a key component in Traditional Chinese Medicine. Chinese medical practitioners diagnose the health status of a patient based on observation of the color, shape, and texture characteristics of the tongue. The condition of the tongue can objectively reflect the presence of certain diseases and aid in the differentiation of syndromes, prognosis of disease and establishment of treatment methods. Tongue shape is a very important feature in tongue diagnosis. A different tongue shape other than ellipse could indicate presence of certain pathologies. In this paper, we propose a novel set of features, based on shape geometry and polynomial equations, for automated recognition and classification of the shape of a tongue image using supervised machine learning techniques. We also present a novel method to correct the orientation/deflection of the tongue based on the symmetry of axis detection method. Experimental results obtained on a set of 303 tongue images demonstrate that the proposed method improves the current state of the art method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Inspection of the tongue is a key component in Traditional Chinese Medicine. Chinese medical practitioners diagnose the health status of a patient based on observation of the color, shape, and texture characteristics of the tongue. The condition of the tongue can objectively reflect the presence of certain diseases and aid in the differentiation of syndromes, prognosis of disease and establishment of treatment methods. Tongue shape is a very important feature in tongue diagnosis. A different tongue shape other than ellipse could indicate presence of certain pathologies. In this paper, we propose a novel set of features, based on shape geometry and polynomial equations, for automated recognition and classification of the shape of a tongue image using supervised machine learning techniques. We also present a novel method to correct the orientation/deflection of the tongue based on the symmetry of axis detection method. Experimental results obtained on a set of 303 tongue images demonstrate that the proposed method improves the current state of the art method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Inspection of the tongue is a key component in Traditional Chinese Medicine. Chinese medical practitioners diagnose the health status of a patient based on observation of the color, shape, and texture characteristics of the tongue. The condition of the tongue can objectively reflect the presence of certain diseases and aid in the differentiation of syndromes, prognosis of disease and establishment of treatment methods. Tongue shape is a very important feature in tongue diagnosis. A different tongue shape other than ellipse could indicate presence of certain pathologies. In this paper, we propose a novel set of features, based on shape geometry and polynomial equations, for automated recognition and classification of the shape of a tongue image using supervised machine learning techniques. We also present a novel method to correct the orientation/deflection of the tongue based on the symmetry of axis detection method. Experimental results obtained on a set of 303 tongue images demonstrate that the proposed method improves the current state of the art method.",
"fno": "06470316",
"keywords": [
"Machine Learning",
"Tongue Shape Classification",
"Medical Biometrics",
"Geometric Feature"
],
"authors": [
{
"affiliation": "Computer Science Department and Informatics Institute University of Missouri, Columbia, MO, USA",
"fullName": "Tayo Obafemi-Ajayi",
"givenName": "Tayo",
"surname": "Obafemi-Ajayi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science Department and Informatics Institute University of Missouri, Columbia, MO, USA",
"fullName": "Ratchadaporn Kanawong",
"givenName": "Ratchadaporn",
"surname": "Kanawong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science Department and Informatics Institute University of Missouri, Columbia, MO, USA",
"fullName": "Dong Xu",
"givenName": "Dong",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science Department and Informatics Institute University of Missouri, Columbia, MO, USA",
"fullName": "Ye Duan",
"givenName": "Ye",
"surname": "Duan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibmw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-10-01T00:00:00",
"pubType": "proceedings",
"pages": "273-279",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2746-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06470315",
"articleId": "12OmNxFaLmT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06470317",
"articleId": "12OmNvkpldr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2013/1309/0/06732649",
"title": "Classification of tongue coating using Gabor and Tamura features on unbalanced data set",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2013/06732649/12OmNBCHMHZ",
"parentPublication": {
"id": "proceedings/bibm/2013/1309/0",
"title": "2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2014/5669/0/06999331",
"title": "The study of abnormal red tongue tip chromatic measurements and analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999331/12OmNClQ0tB",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/2/4647b646",
"title": "A Prior Knowledge-Based Algorithm for Tongue Body Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647b646/12OmNrAv3CY",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifita/2009/3600/2/3600b768",
"title": "Application of Image Segmentation Technique in Tongue Diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/ifita/2009/3600b768/12OmNwE9OOj",
"parentPublication": {
"id": "proceedings/ifita/2009/3600/2",
"title": "2009 International Forum on Information Technology and Applications (IFITA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761651",
"title": "Tongue line extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761651/12OmNx0RIKz",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410437",
"title": "Combination of polar edge detection and active contour model for automated tongue segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410437/12OmNxcdFWE",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2013/1309/0/06732705",
"title": "A new tongue diagnosis application on Android platform",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2013/06732705/12OmNz2kqgk",
"parentPublication": {
"id": "proceedings/bibm/2013/1309/0",
"title": "2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2014/5669/0/06999330",
"title": "Automatic tongue image segmentation based on histogram projection and matting",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999330/12OmNzYNNiv",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taai/2018/1229/0/122900a014",
"title": "Tongue Fissure Visualization with Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2018/122900a014/17D45X2fUEP",
"parentPublication": {
"id": "proceedings/taai/2018/1229/0",
"title": "2018 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412845",
"title": "DE-Net: Dilated Encoder Network for Automated Tongue Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412845/1tmhP2ZNmP6",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNB8Cj92",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyuy9UE",
"doi": "10.1109/ICMEW.2014.6890595",
"title": "Modeling a realistic 3D physiological tongue for visual speech synthesis",
"normalizedTitle": "Modeling a realistic 3D physiological tongue for visual speech synthesis",
"abstract": "We built a 3D anatomically and biomechanically accurate physiological tongue model for use in visual speech synthesis. For the anatomical modeling part, the tongue and its muscles are constructed based on accurate medical data. Due to their complexity, muscles geometry and fiber arrangement are specified by a proposed interactive muscle marking method. For the biomechanical modeling part, a nonlinear, quasi-incompressible, hyperelastic constitutive model is applied for describing the tongue tissues. Particularly, tongue muscles are additionally endowed with an anisotropic constitutive model, which reflects the active and passive mechanical behavior of muscle fibers. The dynamic deformation of tongue is simulated based on finite element method (FEM). Simulation results of tongue movements subjected to certain muscle activations are presented and validated with experimental data. This tongue model can be applied in many areas, like media art, education, entertainment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We built a 3D anatomically and biomechanically accurate physiological tongue model for use in visual speech synthesis. For the anatomical modeling part, the tongue and its muscles are constructed based on accurate medical data. Due to their complexity, muscles geometry and fiber arrangement are specified by a proposed interactive muscle marking method. For the biomechanical modeling part, a nonlinear, quasi-incompressible, hyperelastic constitutive model is applied for describing the tongue tissues. Particularly, tongue muscles are additionally endowed with an anisotropic constitutive model, which reflects the active and passive mechanical behavior of muscle fibers. The dynamic deformation of tongue is simulated based on finite element method (FEM). Simulation results of tongue movements subjected to certain muscle activations are presented and validated with experimental data. This tongue model can be applied in many areas, like media art, education, entertainment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We built a 3D anatomically and biomechanically accurate physiological tongue model for use in visual speech synthesis. For the anatomical modeling part, the tongue and its muscles are constructed based on accurate medical data. Due to their complexity, muscles geometry and fiber arrangement are specified by a proposed interactive muscle marking method. For the biomechanical modeling part, a nonlinear, quasi-incompressible, hyperelastic constitutive model is applied for describing the tongue tissues. Particularly, tongue muscles are additionally endowed with an anisotropic constitutive model, which reflects the active and passive mechanical behavior of muscle fibers. The dynamic deformation of tongue is simulated based on finite element method (FEM). Simulation results of tongue movements subjected to certain muscle activations are presented and validated with experimental data. This tongue model can be applied in many areas, like media art, education, entertainment.",
"fno": "06890595",
"keywords": [
"Muscles",
"Tongue",
"Biological System Modeling",
"Deformable Models",
"Biomechanics",
"Biological Tissues",
"Force",
"Finite Element Model",
"Computer Graphics",
"Visual Speech Synthesis",
"Tongue Model"
],
"authors": [
{
"affiliation": "Department of Automation, University of Science and Technology of China, China",
"fullName": "Chen Jiang",
"givenName": null,
"surname": "Chen Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Automation, University of Science and Technology of China, China",
"fullName": "Changwei Luo",
"givenName": null,
"surname": "Changwei Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Automation, University of Science and Technology of China, China",
"fullName": "Jun Yu",
"givenName": null,
"surname": "Jun Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Automation, University of Science and Technology of China, China",
"fullName": "Rui Li",
"givenName": null,
"surname": "Rui Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Automation, University of Science and Technology of China, China",
"fullName": "Zengfu Wang",
"givenName": null,
"surname": "Zengfu Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": "1945-7871",
"isbn": "978-1-4799-4717-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06890594",
"articleId": "12OmNwdtwkt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06890596",
"articleId": "12OmNy6qfGS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/grc/2011/0372/0/06122581",
"title": "A new tongue model based on muscle-control",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2011/06122581/12OmNA0MZ1l",
"parentPublication": {
"id": "proceedings/grc/2011/0372/0",
"title": "2011 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/1/01326078",
"title": "Speech synthesis from real time ultrasound images of the tongue",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326078/12OmNBSSVna",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2016/1611/0/07822762",
"title": "A fast and precise speech-triggered tongue animation system by combining parameterized model and anatomical model",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822762/12OmNC1Y5r1",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfcse/2011/1562/0/06041672",
"title": "Research Advancements of Compression Equipments in Sports Science",
"doi": null,
"abstractUrl": "/proceedings-article/icfcse/2011/06041672/12OmNqC2uTp",
"parentPublication": {
"id": "proceedings/icfcse/2011/1562/0",
"title": "2011 International Conference on Future Computer Science and Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/miar/2001/1113/0/11130125",
"title": "Realistic Deformable Models for Simulating the Tongue during Laryngoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/miar/2001/11130125/12OmNrMZpoL",
"parentPublication": {
"id": "proceedings/miar/2001/1113/0",
"title": "Medical Imaging and Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2001/1330/0/13300192",
"title": "Realistic Skeletal Muscle Deformation Using Finite Element Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2001/13300192/12OmNs0TKLH",
"parentPublication": {
"id": "proceedings/sibgrapi/2001/1330/0",
"title": "Proceedings XIV Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2012/4357/0/06399749",
"title": "Image-based estimation of biomechanical relationship between masticatory muscle activities and mandibular movement",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2012/06399749/12OmNwCJORK",
"parentPublication": {
"id": "proceedings/bibe/2012/4357/0",
"title": "2012 IEEE 12th International Conference on Bioinformatics & Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00324008",
"title": "Modeling and animating the human tongue during speech production",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00324008/12OmNzcPAMk",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669783",
"title": "Moment Invariants with Data Augmentation for Tongue Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669783/1A9W9etJZO8",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0374",
"title": "Speech Driven Tongue Animation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0374/1H1lUSkaTeg",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCvLY1R",
"title": "Proceedings of Computer Animation '94",
"acronym": "ca",
"groupId": "1000121",
"volume": "0",
"displayVolume": "0",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzcPAMk",
"doi": "10.1109/CA.1994.324008",
"title": "Modeling and animating the human tongue during speech production",
"normalizedTitle": "Modeling and animating the human tongue during speech production",
"abstract": "A geometric and kinematic model for describing the global shape and the predominant motions of the human tongue, to be applied in computer animation, is discussed. The model consists of a spatial configuration of moving points that form the vertices of a mesh of 9 3-D triangles. These triangles are interpreted as charge centres (the so-called skeleton) for a potential field, and the surface of the tongue is modelled as an equi-potential surface of this field. In turn, this surface is approximated by a triangular mesh prior to rendering. As to the motion of the skeleton, precautions are taken in order to achieve (approximate) volume conservation; the computation of the triangular mesh describing the surface of the tongue implements penetration avoidance with respect to the palate. Further, the motions of the skeleton derive from a formal speech model which also controls the motion of the lips to arrive at a visually plausible speech synchronous mouth model.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "A geometric and kinematic model for describing the global shape and the predominant motions of the human tongue, to be applied in computer animation, is discussed. The model consists of a spatial configuration of moving points that form the vertices of a mesh of 9 3-D triangles. These triangles are interpreted as charge centres (the so-called skeleton) for a potential field, and the surface of the tongue is modelled as an equi-potential surface of this field. In turn, this surface is approximated by a triangular mesh prior to rendering. As to the motion of the skeleton, precautions are taken in order to achieve (approximate) volume conservation; the computation of the triangular mesh describing the surface of the tongue implements penetration avoidance with respect to the palate. Further, the motions of the skeleton derive from a formal speech model which also controls the motion of the lips to arrive at a visually plausible speech synchronous mouth model.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A geometric and kinematic model for describing the global shape and the predominant motions of the human tongue, to be applied in computer animation, is discussed. The model consists of a spatial configuration of moving points that form the vertices of a mesh of 9 3-D triangles. These triangles are interpreted as charge centres (the so-called skeleton) for a potential field, and the surface of the tongue is modelled as an equi-potential surface of this field. In turn, this surface is approximated by a triangular mesh prior to rendering. As to the motion of the skeleton, precautions are taken in order to achieve (approximate) volume conservation; the computation of the triangular mesh describing the surface of the tongue implements penetration avoidance with respect to the palate. Further, the motions of the skeleton derive from a formal speech model which also controls the motion of the lips to arrive at a visually plausible speech synchronous mouth model.",
"fno": "00324008",
"keywords": [
"Computer Animation",
"Rendering Computer Graphics",
"Computational Geometry",
"Speech Synthesis",
"Human Tongue",
"Speech Production",
"Kinematic Model",
"Geometric Model",
"Global Shape",
"Computer Animation",
"3 D Triangles",
"Charge Centres",
"Equi Potential Surface",
"Triangular Mesh",
"Rendering",
"Formal Speech Model",
"Lips",
"Speech Synchronous Mouth Model",
"Animation",
"Humans",
"Tongue",
"Speech",
"Skeleton",
"Kinematics",
"Shape",
"Motion Control",
"Lips",
"Mouth"
],
"authors": [
{
"affiliation": "Dept. of Comput. & Inf. Sci., Pennsylvania Univ., Philadelphia, PA, USA",
"fullName": "C. Pelachaud",
"givenName": "C.",
"surname": "Pelachaud",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "C.W.A.M. van Overveld",
"givenName": "C.W.A.M.",
"surname": "van Overveld",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "C. Seah",
"givenName": "C.",
"surname": "Seah",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ca",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "40,41,42,43,44,45,46,47,48,49",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00324007",
"articleId": "12OmNwK7o9I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00324009",
"articleId": "12OmNvoWUYx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457529",
"title": "The use of tongue protrusion gestures for video-based communication",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457529/12OmNAZOJY9",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a306",
"title": "Lattice-Based Skinning and Deformation for Real-Time Skeleton-Driven Animation",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a306/12OmNBEGYGs",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2016/1611/0/07822762",
"title": "A fast and precise speech-triggered tongue animation system by combining parameterized model and anatomical model",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822762/12OmNC1Y5r1",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc/2009/3929/0/3929a413",
"title": "Automatic Segmentation in Tongue Image by Mouth Location and Active Appearance Model",
"doi": null,
"abstractUrl": "/proceedings-article/dasc/2009/3929a413/12OmNqHItCG",
"parentPublication": {
"id": "proceedings/dasc/2009/3929/0",
"title": "Dependable, Autonomic and Secure Computing, IEEE International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/miar/2001/1113/0/11130125",
"title": "Realistic Deformable Models for Simulating the Tongue during Laryngoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/miar/2001/11130125/12OmNrMZpoL",
"parentPublication": {
"id": "proceedings/miar/2001/1113/0",
"title": "Medical Imaging and Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761651",
"title": "Tongue line extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761651/12OmNx0RIKz",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/1997/8028/0/80280117",
"title": "\"Skeleton climbing\": fast isosurfaces with fewer triangles",
"doi": null,
"abstractUrl": "/proceedings-article/pg/1997/80280117/12OmNxFaLCm",
"parentPublication": {
"id": "proceedings/pg/1997/8028/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890595",
"title": "Modeling a realistic 3D physiological tongue for visual speech synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890595/12OmNyuy9UE",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2017/3013/0/3013a752",
"title": "Tongue Image Segmentation via Color Decomposition and Thresholding",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2017/3013a752/12OmNzlD9fB",
"parentPublication": {
"id": "proceedings/icisce/2017/3013/0",
"title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0374",
"title": "Speech Driven Tongue Animation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0374/1H1lUSkaTeg",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKir6",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WIXbQ2",
"doi": "10.1109/ICPR.2018.8545185",
"title": "Reducing Tongue Shape Dimensionality from Hundreds of Available Resources Using Autoencoder",
"normalizedTitle": "Reducing Tongue Shape Dimensionality from Hundreds of Available Resources Using Autoencoder",
"abstract": "In spite of various observation tools, tongue shapes are still scarce resource in reality. Autoencoder, a kind of deep neural networks (DNN), performs well on data reduction and pattern discovery. However, since autoencoder usually needs large scale data in training, challenges exist for traditional autoencoder to obtain tongues' motion patterns only from tens or hundreds of available tongue shapes. To overcome this problem, we propose a two-steps autoencoder, where we first construct a stacked denoising autoencoder (dAE) to learn the essential presentation of the tongue shapes from their possible deformations; then an additional autoencoder with small number of hidden units is added upon the previous stacked autoencoder, and used for dimensionality reduction. Experiments run on 240 vowels' tongue shapes obtained from Chinese speakers' pronunciation X-ray films, and the proposed model is compared with traditional dAE and the classical principal component analysis (PCA) on dimensionality reduction and reconstruction in details. Results validate the performance of the proposed tongue model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In spite of various observation tools, tongue shapes are still scarce resource in reality. Autoencoder, a kind of deep neural networks (DNN), performs well on data reduction and pattern discovery. However, since autoencoder usually needs large scale data in training, challenges exist for traditional autoencoder to obtain tongues' motion patterns only from tens or hundreds of available tongue shapes. To overcome this problem, we propose a two-steps autoencoder, where we first construct a stacked denoising autoencoder (dAE) to learn the essential presentation of the tongue shapes from their possible deformations; then an additional autoencoder with small number of hidden units is added upon the previous stacked autoencoder, and used for dimensionality reduction. Experiments run on 240 vowels' tongue shapes obtained from Chinese speakers' pronunciation X-ray films, and the proposed model is compared with traditional dAE and the classical principal component analysis (PCA) on dimensionality reduction and reconstruction in details. Results validate the performance of the proposed tongue model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In spite of various observation tools, tongue shapes are still scarce resource in reality. Autoencoder, a kind of deep neural networks (DNN), performs well on data reduction and pattern discovery. However, since autoencoder usually needs large scale data in training, challenges exist for traditional autoencoder to obtain tongues' motion patterns only from tens or hundreds of available tongue shapes. To overcome this problem, we propose a two-steps autoencoder, where we first construct a stacked denoising autoencoder (dAE) to learn the essential presentation of the tongue shapes from their possible deformations; then an additional autoencoder with small number of hidden units is added upon the previous stacked autoencoder, and used for dimensionality reduction. Experiments run on 240 vowels' tongue shapes obtained from Chinese speakers' pronunciation X-ray films, and the proposed model is compared with traditional dAE and the classical principal component analysis (PCA) on dimensionality reduction and reconstruction in details. Results validate the performance of the proposed tongue model.",
"fno": "08545185",
"keywords": [
"Tongue",
"Shape",
"Strain",
"Dimensionality Reduction",
"Training",
"Image Reconstruction",
"Noise Reduction",
"Vocal Tract",
"Tongue Shape",
"PCA",
"Neural Network"
],
"authors": [
{
"affiliation": "National Laboratory of Pattern Recognition (NLPR), Institute of Automation, Chinese Academy of Sciences, 100190, China",
"fullName": "Minghao Yang",
"givenName": "Minghao",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Laboratory of Pattern Recognition (NLPR), Institute of Automation, Chinese Academy of Sciences, 100190, China",
"fullName": "Dawei Zhang",
"givenName": "Dawei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Laboratory of Pattern Recognition (NLPR), Institute of Automation, Chinese Academy of Sciences, 100190, China",
"fullName": "Jianhua Tao",
"givenName": "Jianhua",
"surname": "Tao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-08-01T00:00:00",
"pubType": "proceedings",
"pages": "2875-2880",
"year": "2018",
"issn": "1051-4651",
"isbn": "978-1-5386-3788-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08545257",
"articleId": "17D45WODaqk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08545687",
"articleId": "17D45VVho4Z",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/grc/2011/0372/0/06122581",
"title": "A new tongue model based on muscle-control",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2011/06122581/12OmNA0MZ1l",
"parentPublication": {
"id": "proceedings/grc/2011/0372/0",
"title": "2011 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2008/3136/0/3136a213",
"title": "Performance Comparison of Three Types of Autoencoder Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2008/3136a213/12OmNBO3JVx",
"parentPublication": {
"id": "proceedings/ams/2008/3136/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892238",
"title": "Acoustic VR in the mouth: A real-time speech-driven visual tongue system",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892238/12OmNvmowOB",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050811",
"title": "Physics-Based Deformable Tongue Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050811/13rRUNvgziC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2021/0403/0/09705000",
"title": "Structure-Preserving Deep Autoencoder-based Dimensionality Reduction for Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2021/09705000/1AUpdHxXpVS",
"parentPublication": {
"id": "proceedings/snpd/2021/0403/0",
"title": "2021 IEEE/ACIS 22nd International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2021/0668/0/066800a687",
"title": "Examing and Evaluating Dimension Reduction Algorithms for Classifying Alzheimer’s Diseases using Gene Expression Data",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2021/066800a687/1CxzOX5024g",
"parentPublication": {
"id": "proceedings/msn/2021/0668/0",
"title": "2021 17th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09953323",
"title": "Tongue-Jaw Movement Recognition Through Acoustic Sensing on Smartphones",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09953323/1IlJCEDjYC4",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2019/5686/0/568600a577",
"title": "Autoencoder Based Dimensionality Reduction of Feature Vectors for Object Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2019/568600a577/1j9xB188lAk",
"parentPublication": {
"id": "proceedings/sitis/2019/5686/0",
"title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2019/1419/0/09146039",
"title": "Dual Denoising Autoencoder Feature Learning for Cancer Diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2019/09146039/1lFJdVaIcNi",
"parentPublication": {
"id": "proceedings/icci*cc/2019/1419/0",
"title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412865",
"title": "Dimensionality Reduction for Data Visualization and Linear Classification, and the Trade-off between Robustness and Classification Accuracy",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412865/1tmhQxVISaI",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirO",
"title": "2018 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"acronym": "taai",
"groupId": "1800268",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45X2fUEP",
"doi": "10.1109/TAAI.2018.00013",
"title": "Tongue Fissure Visualization with Deep Learning",
"normalizedTitle": "Tongue Fissure Visualization with Deep Learning",
"abstract": "Tongue diagnosis is a unique practice in traditional Chinese medicine(TCM), which can be used to infer the health condition of a person. However, different TCM doctors may give different interpretations on the same tongue. If an artificial intelligence model can be developed based on a large number of doctor-interpreted tongue images, a more objective judgment will be obtained. Deep learning in artificial intelligence has excellent performance in image recognition, and feature extraction can be done automatically by deep learning without image processing experts. This study attempts to develop a deep learning model through a large number of tongue images, especially for tongue fissures. We also visualize the fissure regions with Gradient-weighted Class Activation Mapping(Grad-cam). Therefore, the model not only try to detect tongue fissures but also localize tongue fissure regions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Tongue diagnosis is a unique practice in traditional Chinese medicine(TCM), which can be used to infer the health condition of a person. However, different TCM doctors may give different interpretations on the same tongue. If an artificial intelligence model can be developed based on a large number of doctor-interpreted tongue images, a more objective judgment will be obtained. Deep learning in artificial intelligence has excellent performance in image recognition, and feature extraction can be done automatically by deep learning without image processing experts. This study attempts to develop a deep learning model through a large number of tongue images, especially for tongue fissures. We also visualize the fissure regions with Gradient-weighted Class Activation Mapping(Grad-cam). Therefore, the model not only try to detect tongue fissures but also localize tongue fissure regions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Tongue diagnosis is a unique practice in traditional Chinese medicine(TCM), which can be used to infer the health condition of a person. However, different TCM doctors may give different interpretations on the same tongue. If an artificial intelligence model can be developed based on a large number of doctor-interpreted tongue images, a more objective judgment will be obtained. Deep learning in artificial intelligence has excellent performance in image recognition, and feature extraction can be done automatically by deep learning without image processing experts. This study attempts to develop a deep learning model through a large number of tongue images, especially for tongue fissures. We also visualize the fissure regions with Gradient-weighted Class Activation Mapping(Grad-cam). Therefore, the model not only try to detect tongue fissures but also localize tongue fissure regions.",
"fno": "122900a014",
"keywords": [
"Data Visualisation",
"Feature Extraction",
"Learning Artificial Intelligence",
"Medical Image Processing",
"Object Detection",
"Object Recognition",
"Patient Diagnosis",
"Deep Learning Model",
"Tongue Fissure Visualization",
"Tongue Diagnosis",
"Health Condition",
"Artificial Intelligence Model",
"Doctor Interpreted Tongue Images",
"Image Recognition",
"Feature Extraction",
"Tongue Fissure Region Localization",
"TCM Doctors",
"Grad Cam",
"Gradient Weighted Class Activation Mapping",
"TCM Doctors",
"Traditional Chinese Medicine",
"Tongue",
"Feature Extraction",
"Medical Services",
"Medical Diagnostic Imaging",
"Chinese Medicine Tongue Diagnosis Artificial Intelligence Deep Learning Class Activation Mapping"
],
"authors": [
{
"affiliation": null,
"fullName": "Wen-Hsien Chang",
"givenName": "Wen-Hsien",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hsueh-Ting Chu",
"givenName": "Hsueh-Ting",
"surname": "Chu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hen-Hong Chang",
"givenName": "Hen-Hong",
"surname": "Chang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "taai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-11-01T00:00:00",
"pubType": "proceedings",
"pages": "14-17",
"year": "2018",
"issn": null,
"isbn": "978-1-7281-1229-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "122900a010",
"articleId": "17D45WrVgcg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "122900a018",
"articleId": "17D45WK5ArU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2014/5669/0/06999331",
"title": "The study of abnormal red tongue tip chromatic measurements and analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999331/12OmNClQ0tB",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2010/8303/0/05703898",
"title": "Study about the feature of some parts of the Traditional Chinese Medicine(TCM) physical examination correlate with the indexes of the renal function",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2010/05703898/12OmNqFJhP4",
"parentPublication": {
"id": "proceedings/bibmw/2010/8303/0",
"title": "2010 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2014/5669/0/06999328",
"title": "Cracked tongue recognition using statistic feature",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999328/12OmNwcUk05",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761651",
"title": "Tongue line extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761651/12OmNx0RIKz",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410437",
"title": "Combination of polar edge detection and active contour model for automated tongue segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410437/12OmNxcdFWE",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2013/1309/0/06732705",
"title": "A new tongue diagnosis application on Android platform",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2013/06732705/12OmNz2kqgk",
"parentPublication": {
"id": "proceedings/bibm/2013/1309/0",
"title": "2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2010/8303/0/05703871",
"title": "TCM health management system",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2010/05703871/12OmNzdoMYK",
"parentPublication": {
"id": "proceedings/bibmw/2010/8303/0",
"title": "2010 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09994964",
"title": "Chinese Medicine Tongue Recognition under Data Scarcity Based on the Xingbaohui Platform",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09994964/1JC1QuFZj9u",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccia/2020/6042/0/09178692",
"title": "Classifying Tongue Images using Deep Transfer Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iccia/2020/09178692/1mDu1Wfppio",
"parentPublication": {
"id": "proceedings/iccia/2020/6042/0",
"title": "2020 5th International Conference on Computational Intelligence and Applications (ICCIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313321",
"title": "Machine Learning Opportunities for Automatic Tongue Diagnosis Systems",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313321/1qmfSVhBXGw",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKa6fk",
"title": "Proceedings of 1994 IEEE Workshop on Motion of Non-rigid and Articulated Objects",
"acronym": "mnrao",
"groupId": "1001997",
"volume": "0",
"displayVolume": "0",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBtl1sY",
"doi": "10.1109/MNRAO.1994.346252",
"title": "Lower limb kinematics of human walking with the medial axis transformation",
"normalizedTitle": "Lower limb kinematics of human walking with the medial axis transformation",
"abstract": "The paper describes a simple model for free-speed human walking and compares ordinary images of a walking person to the model. Three dimensional kinematic data were obtained from subjects walking with markers over the joints of their limbs. The average of these data was used to derive a model stick figure of the lower limbs, based on the average anthropometric data of the population. Stick figures were obtained from ordinary images of persons dressed in tight fitting clothes without any markers by using the medial axis transformation. The two dimensional information from the image stick figures was compared with the projection of the three dimensional information of the model onto the relevant plane. A high degree of correlation was noted between the rotational patterns of the model and image stick figures.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The paper describes a simple model for free-speed human walking and compares ordinary images of a walking person to the model. Three dimensional kinematic data were obtained from subjects walking with markers over the joints of their limbs. The average of these data was used to derive a model stick figure of the lower limbs, based on the average anthropometric data of the population. Stick figures were obtained from ordinary images of persons dressed in tight fitting clothes without any markers by using the medial axis transformation. The two dimensional information from the image stick figures was compared with the projection of the three dimensional information of the model onto the relevant plane. A high degree of correlation was noted between the rotational patterns of the model and image stick figures.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The paper describes a simple model for free-speed human walking and compares ordinary images of a walking person to the model. Three dimensional kinematic data were obtained from subjects walking with markers over the joints of their limbs. The average of these data was used to derive a model stick figure of the lower limbs, based on the average anthropometric data of the population. Stick figures were obtained from ordinary images of persons dressed in tight fitting clothes without any markers by using the medial axis transformation. The two dimensional information from the image stick figures was compared with the projection of the three dimensional information of the model onto the relevant plane. A high degree of correlation was noted between the rotational patterns of the model and image stick figures.",
"fno": "00346252",
"keywords": [
"Kinematics",
"Biomechanics",
"Solid Modelling",
"Motion Estimation",
"Lower Limb Kinematics",
"Human Walking",
"Medial Axis Transformation",
"Free Speed Human Walking",
"Walking Person",
"Three Dimensional Kinematic Data",
"Model Stick Figure",
"Average Anthropometric Data",
"Two Dimensional Information",
"Three Dimensional Information",
"Rotational Patterns",
"Image Stick Figures",
"Kinematics",
"Humans",
"Legged Locomotion",
"Biological System Modeling",
"Kinetic Theory",
"Frequency",
"Thigh",
"Leg",
"Foot",
"Motion Analysis"
],
"authors": [
{
"affiliation": "Comput. & Vision Res. Center, Texas Univ., Austin, TX, USA",
"fullName": "A.G. Bharatkumar",
"givenName": "A.G.",
"surname": "Bharatkumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "K.E. Daigle",
"givenName": "K.E.",
"surname": "Daigle",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M.G. Pandy",
"givenName": "M.G.",
"surname": "Pandy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qin Cai",
"givenName": null,
"surname": "Qin Cai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "J.K. Aggarwal",
"givenName": "J.K.",
"surname": "Aggarwal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mnrao",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "70,71,72,73,74,75,76",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00346251",
"articleId": "12OmNCgrD22",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00346253",
"articleId": "12OmNBqdrhf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icndc/2010/8382/0/05645365",
"title": "Force/Torque-based Compliance Control for Humanoid Robot to Compensate the Landing Impact Force",
"doi": null,
"abstractUrl": "/proceedings-article/icndc/2010/05645365/12OmNAoDibc",
"parentPublication": {
"id": "proceedings/icndc/2010/8382/0",
"title": "2010 First International Conference on Networking and Distributed Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cca/2000/6562/0/00897393",
"title": "Constant torque walking",
"doi": null,
"abstractUrl": "/proceedings-article/cca/2000/00897393/12OmNAs2trp",
"parentPublication": {
"id": "proceedings/cca/2000/6562/0",
"title": "Proceedings of the 2000 IEEE International Conference on Control Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223431",
"title": "Walking recording and experience system by Visual Psychophysics Lab",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223431/12OmNB1NVNQ",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220320",
"title": "A walking prescription for statically-stable walkers based on walker/terrain interaction",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220320/12OmNvA1hhp",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciicii/2017/2434/0/2434a164",
"title": "A Study of Exoskeleton Walking Aids Device",
"doi": null,
"abstractUrl": "/proceedings-article/iciicii/2017/2434a164/12OmNxdVgTa",
"parentPublication": {
"id": "proceedings/iciicii/2017/2434/0",
"title": "2017 International Conference on Industrial Informatics - Computing Technology, Intelligent Technology, Industrial Information Integration (ICIICII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00132014",
"title": "Laser rangefinder calibration for a walking robot",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00132014/12OmNy9Prfx",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/med/2006/1/0/04124877",
"title": "Observer-based control for absolute orientation estimation of a five-link walking biped robot",
"doi": null,
"abstractUrl": "/proceedings-article/med/2006/04124877/12OmNz5JBNP",
"parentPublication": {
"id": "proceedings/med/2006/1/0",
"title": "Proceedings of the 14th Mediterranean Conference on Control and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09881908",
"title": "PropelWalker: A Leg-Based Wearable System With Propeller-Based Force Feedback for Walking in Fluids in VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09881908/1Gv909WpCG4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798263",
"title": "EEG Can Be Used to Measure Embodiment When Controlling a Walking Self-Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798263/1cJ1gj5NtQc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090453",
"title": "Perception of Walking Self-body Avatar Enhances Virtual-walking Sensation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090453/1jIxoojmMy4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqG0SVt",
"title": "2009 Mexican International Conference on Computer Science",
"acronym": "enc",
"groupId": "1000139",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBziB91",
"doi": "10.1109/ENC.2009.39",
"title": "Query an Image Database by Segmentation and Content",
"normalizedTitle": "Query an Image Database by Segmentation and Content",
"abstract": "The Recent advances on image databases have been developed and most of them consider several methods to query image, the amount of information stored is so big that it is a must to use a combination of different techniques such as image segmentation in order to reduce the dimensionality of the search space. Taking advantage of an image pictographic expressiveness together with the soundness of image segmentation methods, it is possible to rely on an efficient method to query an image database. In this work, it is proposed a new method of image segmentation, indexation and retrieval by content. In this paper an image is not considered as a set of objects, is considered as a feature vector where its components represent a segment of color. Color is treated in another color space rather than to work on RGB space. For each image a fuzzy histogram is obtained in order to get for each image its own signature together whit its own feature vector. Fuzzy theory is applied to solve color uncertainty, which it comes from color quantification and human perception of colors. The whole set of images, which are in RGB representation are transformed to LAB model, obtaining better color representation in order to obtain a feature vector together with wavelet coefficients.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Recent advances on image databases have been developed and most of them consider several methods to query image, the amount of information stored is so big that it is a must to use a combination of different techniques such as image segmentation in order to reduce the dimensionality of the search space. Taking advantage of an image pictographic expressiveness together with the soundness of image segmentation methods, it is possible to rely on an efficient method to query an image database. In this work, it is proposed a new method of image segmentation, indexation and retrieval by content. In this paper an image is not considered as a set of objects, is considered as a feature vector where its components represent a segment of color. Color is treated in another color space rather than to work on RGB space. For each image a fuzzy histogram is obtained in order to get for each image its own signature together whit its own feature vector. Fuzzy theory is applied to solve color uncertainty, which it comes from color quantification and human perception of colors. The whole set of images, which are in RGB representation are transformed to LAB model, obtaining better color representation in order to obtain a feature vector together with wavelet coefficients.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Recent advances on image databases have been developed and most of them consider several methods to query image, the amount of information stored is so big that it is a must to use a combination of different techniques such as image segmentation in order to reduce the dimensionality of the search space. Taking advantage of an image pictographic expressiveness together with the soundness of image segmentation methods, it is possible to rely on an efficient method to query an image database. In this work, it is proposed a new method of image segmentation, indexation and retrieval by content. In this paper an image is not considered as a set of objects, is considered as a feature vector where its components represent a segment of color. Color is treated in another color space rather than to work on RGB space. For each image a fuzzy histogram is obtained in order to get for each image its own signature together whit its own feature vector. Fuzzy theory is applied to solve color uncertainty, which it comes from color quantification and human perception of colors. The whole set of images, which are in RGB representation are transformed to LAB model, obtaining better color representation in order to obtain a feature vector together with wavelet coefficients.",
"fno": "3882a127",
"keywords": [
"Fuzzy Set Theory",
"Image Colour Analysis",
"Image Segmentation",
"Visual Databases",
"Wavelet Transforms",
"Image Database",
"Image Segmentation",
"Pictographic Expressiveness",
"Fuzzy Histogram",
"RGB Space",
"Fuzzy Theory",
"Color Quantification",
"RGB Representation",
"LAB Model",
"Image Databases",
"Image Segmentation",
"Color",
"Image Retrieval",
"Content Based Retrieval",
"Histograms",
"Information Retrieval",
"Humans",
"Space Technology",
"Data Mining"
],
"authors": [
{
"affiliation": "Fac. de Cienc. de la Comput., Univ. Autonoma de Puebla (BUAP), Puebla, Mexico",
"fullName": "Enrique Castillo Juárez",
"givenName": "Enrique",
"surname": "Castillo Juárez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fac. de Cienc. de la Comput., Univ. Autonoma de Puebla (BUAP), Puebla, Mexico",
"fullName": "Ivo H. Pineda Torres",
"givenName": "Ivo H.",
"surname": "Pineda Torres",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fac. de Cienc. de la Comput., Univ. Autonoma de Puebla (BUAP), Puebla, Mexico",
"fullName": "Maria J. Somodevilla",
"givenName": "Maria J.",
"surname": "Somodevilla",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fac. de Cienc. de la Comput., Univ. Autonoma de Puebla (BUAP), Puebla, Mexico",
"fullName": "Manuel Martín Ortíz",
"givenName": "Manuel",
"surname": "Martín Ortíz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "enc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "127-134",
"year": "2009",
"issn": null,
"isbn": "978-1-4244-5258-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3882a119",
"articleId": "12OmNCdBDKt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3882a137",
"articleId": "12OmNCwCLri",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457612",
"title": "Fast image segmentation and texture feature extraction for image retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457612/12OmNAoDidz",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284698",
"title": "Texture Moment for Content-Based Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284698/12OmNvAiSGZ",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2002/1727/0/17270914",
"title": "An Effective Content-Based Visual Image Retrieval System",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2002/17270914/12OmNwBT1sp",
"parentPublication": {
"id": "proceedings/compsac/2002/1727/0",
"title": "Proceedings 26th Annual International Computer Software and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118219",
"title": "Color segmentation by hierarchical connected components analysis with image enhancement by symmetric neighborhood filters",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118219/12OmNwlqhR3",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmpcon/1994/5380/0/00282889",
"title": "Ultimedia Manager: Query By Image Content and its applications",
"doi": null,
"abstractUrl": "/proceedings-article/cmpcon/1994/00282889/12OmNxVlTH4",
"parentPublication": {
"id": "proceedings/cmpcon/1994/5380/0",
"title": "Proceedings of COMPCON '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ias/2009/3744/2/3744b381",
"title": "An Improving Technique of Color Histogram in Segmentation-based Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/ias/2009/3744b381/12OmNxwENp8",
"parentPublication": {
"id": "proceedings/ias/2009/3744/2",
"title": "Information Assurance and Security, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmcs/1997/7819/0/00609755",
"title": "Color clustering techniques for color-content-based image retrieval from image databases",
"doi": null,
"abstractUrl": "/proceedings-article/mmcs/1997/00609755/12OmNzl3WQA",
"parentPublication": {
"id": "proceedings/mmcs/1997/7819/0",
"title": "Proceedings of IEEE International Conference on Multimedia Computing and Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/2/02530705",
"title": "Efficient Content-Based Image Retrieval Based on Color Homogeneous Objects Segmentation and their Spatial Relationship Characterization",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02530705/12OmNzlUKyQ",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/2",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gscit/2016/2659/0/2659a054",
"title": "Gradual Integration of Local Color information for Image Retrieval by Content: Application to Cell-CCV Method",
"doi": null,
"abstractUrl": "/proceedings-article/gscit/2016/2659a054/12OmNznCl08",
"parentPublication": {
"id": "proceedings/gscit/2016/2659/0",
"title": "2016 Global Summit on Computer & Information Technology (GSCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576246",
"title": "Indexing for complex queries on a query-by-content image database",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576246/12OmNzsJ7tA",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBNM8Mg",
"title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"acronym": "sitis",
"groupId": "1002425",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrJAdRd",
"doi": "10.1109/SITIS.2015.91",
"title": "Fast Face Detection Based on Skin Segmentation and Facial Features",
"normalizedTitle": "Fast Face Detection Based on Skin Segmentation and Facial Features",
"abstract": "Human face detection plays an important role in various biometric applications such as crowd surveillance, photography, human computer interaction, tracking, automatic target recognition and many security related areas. Varying illumination conditions, color variance, brightness, pose variations are major challenging problems for face detection. This paper proposes a localized approach for face detection based on skin color segmentation and facial features. Skin color segmentation approach decreases the computational complexity and increases the accuracy since the skin region is previously determined. For skin color segmentation, we have used Y CbCr color image. The advantage of using Y CbCr is to remove the illumination component that is represented by Y. This method is tested on two databases: Bao database: contains 157 images and Muct database: contains 751 images. The algorithm achieves an average accuracy of 96:73%. Comparison with Viola Jones and Face Detection using Skin Color Model methods has also been done.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Human face detection plays an important role in various biometric applications such as crowd surveillance, photography, human computer interaction, tracking, automatic target recognition and many security related areas. Varying illumination conditions, color variance, brightness, pose variations are major challenging problems for face detection. This paper proposes a localized approach for face detection based on skin color segmentation and facial features. Skin color segmentation approach decreases the computational complexity and increases the accuracy since the skin region is previously determined. For skin color segmentation, we have used Y CbCr color image. The advantage of using Y CbCr is to remove the illumination component that is represented by Y. This method is tested on two databases: Bao database: contains 157 images and Muct database: contains 751 images. The algorithm achieves an average accuracy of 96:73%. Comparison with Viola Jones and Face Detection using Skin Color Model methods has also been done.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Human face detection plays an important role in various biometric applications such as crowd surveillance, photography, human computer interaction, tracking, automatic target recognition and many security related areas. Varying illumination conditions, color variance, brightness, pose variations are major challenging problems for face detection. This paper proposes a localized approach for face detection based on skin color segmentation and facial features. Skin color segmentation approach decreases the computational complexity and increases the accuracy since the skin region is previously determined. For skin color segmentation, we have used Y CbCr color image. The advantage of using Y CbCr is to remove the illumination component that is represented by Y. This method is tested on two databases: Bao database: contains 157 images and Muct database: contains 751 images. The algorithm achieves an average accuracy of 96:73%. Comparison with Viola Jones and Face Detection using Skin Color Model methods has also been done.",
"fno": "9721a663",
"keywords": [
"Skin",
"Image Color Analysis",
"Face",
"Face Detection",
"Image Segmentation",
"Yttrium",
"Lightning",
"Eccentricity",
"Image Segmentation",
"Face Detection",
"Color Model",
"Median Filter",
"Euler Number"
],
"authors": [
{
"affiliation": null,
"fullName": "Shalini Yadav",
"givenName": "Shalini",
"surname": "Yadav",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Neeta Nain",
"givenName": "Neeta",
"surname": "Nain",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sitis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-11-01T00:00:00",
"pubType": "proceedings",
"pages": "663-668",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9721-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9721a655",
"articleId": "12OmNzVGcGR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9721a669",
"articleId": "12OmNwD1q0K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wi-iat/2012/4880/3/4880c373",
"title": "Skin Segmentation Based on Human Face Illumination Feature",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2012/4880c373/12OmNAR1b0B",
"parentPublication": {
"id": "proceedings/wi-iat/2012/4880/1",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2014/5179/0/06850755",
"title": "Face detection using skin color modeling and geometric feature",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850755/12OmNBh8gTs",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2014/5179/0/06850763",
"title": "Facial features detection in color images based on skin color segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/06850763/12OmNsd6vjP",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdcloud/2015/7183/0/7183a256",
"title": "Face Detection under Particular Environment Based on Skin Color Model and Radial Basis Function Network",
"doi": null,
"abstractUrl": "/proceedings-article/bdcloud/2015/7183a256/12OmNvEhg0k",
"parentPublication": {
"id": "proceedings/bdcloud/2015/7183/0",
"title": "2015 IEEE Fifth International Conference on Big Data and Cloud Computing (BDCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iita/2008/3497/1/3497a457",
"title": "Face Tracking in Video Sequences Using Particle Filter Based on Skin Color Model and Facial Contour",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2008/3497a457/12OmNx9WT1h",
"parentPublication": {
"id": "proceedings/iita/2008/3497/3",
"title": "2008 Second International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicsyn/2011/4482/0/4482a219",
"title": "Face Detection Based on Fuzzy Granulation and Skin Color Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cicsyn/2011/4482a219/12OmNy49sN3",
"parentPublication": {
"id": "proceedings/cicsyn/2011/4482/0",
"title": "Computational Intelligence, Communication Systems and Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2008/3267/0/3267a095",
"title": "Face Detection and Localization of Facial Features in Still and Video Images",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2008/3267a095/12OmNypIYFg",
"parentPublication": {
"id": "proceedings/icetet/2008/3267/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2016/9919/0/07459203",
"title": "Incorporating skin color for improved face detection and tracking system",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2016/07459203/12OmNzXWZJq",
"parentPublication": {
"id": "proceedings/ssiai/2016/9919/0",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmwa/2009/3646/0/3646a070",
"title": "Study on Face Detection Algorithm Based on Skin Color Segmentation and AdaBoost Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/wmwa/2009/3646a070/12OmNzlD95K",
"parentPublication": {
"id": "proceedings/wmwa/2009/3646/0",
"title": "Web Mining and Web-based Application, Pacific-Asia Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fcst/2010/7779/0/05577329",
"title": "Skin-Anatomy Based Face Texture Image Synthesis by Skin Feature Distribution Analyzing Method",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2010/05577329/12OmNzzxuta",
"parentPublication": {
"id": "proceedings/fcst/2010/7779/0",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxA3Z4B",
"title": "2009 XXII Brazilian Symposium on Computer Graphics and Image Processing",
"acronym": "sibgrapi",
"groupId": "1000131",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrkT7FX",
"doi": "10.1109/SIBGRAPI.2009.47",
"title": "A Study of the Effect of Illumination Conditions and Color Spaces on Skin Segmentation",
"normalizedTitle": "A Study of the Effect of Illumination Conditions and Color Spaces on Skin Segmentation",
"abstract": "This work aims at investigating the influence of luminance information and environment illumination on skin classification. We explore Bayesian approaches to perform automatic classification of human skin pixels on digital images, using color features as input. Two probabilistic skin color models were built on different color spaces (RGB, normalized RG, HSI, HS, YCbCr and CbCr) and tested in a task of automatic pixel classification into skin and non-skin. Analyses of classification performance were done by presenting an illumination controlled image database containing images acquired in four different illumination conditions (shadow, sun, incandescent and fluorescent lights) to these classifiers. Our experiments show that building probabilistic skin color models using the CbCr color space generally improves performance of the classifiers and that best performance is achieved in shadow illumination.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work aims at investigating the influence of luminance information and environment illumination on skin classification. We explore Bayesian approaches to perform automatic classification of human skin pixels on digital images, using color features as input. Two probabilistic skin color models were built on different color spaces (RGB, normalized RG, HSI, HS, YCbCr and CbCr) and tested in a task of automatic pixel classification into skin and non-skin. Analyses of classification performance were done by presenting an illumination controlled image database containing images acquired in four different illumination conditions (shadow, sun, incandescent and fluorescent lights) to these classifiers. Our experiments show that building probabilistic skin color models using the CbCr color space generally improves performance of the classifiers and that best performance is achieved in shadow illumination.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work aims at investigating the influence of luminance information and environment illumination on skin classification. We explore Bayesian approaches to perform automatic classification of human skin pixels on digital images, using color features as input. Two probabilistic skin color models were built on different color spaces (RGB, normalized RG, HSI, HS, YCbCr and CbCr) and tested in a task of automatic pixel classification into skin and non-skin. Analyses of classification performance were done by presenting an illumination controlled image database containing images acquired in four different illumination conditions (shadow, sun, incandescent and fluorescent lights) to these classifiers. Our experiments show that building probabilistic skin color models using the CbCr color space generally improves performance of the classifiers and that best performance is achieved in shadow illumination.",
"fno": "3813a245",
"keywords": [
"Skin Segmentation",
"Bayes Theory",
"Image Processing",
"Color Spaces"
],
"authors": [
{
"affiliation": null,
"fullName": "Diogo Kuiaski",
"givenName": "Diogo",
"surname": "Kuiaski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hugo Vieira Neto",
"givenName": "Hugo Vieira",
"surname": "Neto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gustavo Borba",
"givenName": "Gustavo",
"surname": "Borba",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Humberto Gamba",
"givenName": "Humberto",
"surname": "Gamba",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sibgrapi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-10-01T00:00:00",
"pubType": "proceedings",
"pages": "245-252",
"year": "2009",
"issn": "1530-1834",
"isbn": "978-0-7695-3813-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3813a238",
"articleId": "12OmNxdDFOT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3813a253",
"articleId": "12OmNyeECH0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icfcc/2009/3591/0/3591a324",
"title": "Some Notes on Accuracy Constraints of Pixel Based Skin Color Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icfcc/2009/3591a324/12OmNBNM8QV",
"parentPublication": {
"id": "proceedings/icfcc/2009/3591/0",
"title": "2009 International Conference on Future Computer and Communication (ICFCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031b954",
"title": "Skin Color Segmentation Based on Improved 2D Otsu and YCgCr",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031b954/12OmNCbU2SR",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2011/2135/0/06120998",
"title": "A Power Law Transformation Predicting Lightness Conditions Based on Skin Color Space Detection",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2011/06120998/12OmNCga1SI",
"parentPublication": {
"id": "proceedings/trustcom/2011/2135/0",
"title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgec/2011/4449/0/4449a252",
"title": "Eye Detection Based on Skin Color Analysis with Different Poses under Varying Illumination Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2011/4449a252/12OmNqGitQw",
"parentPublication": {
"id": "proceedings/icgec/2011/4449/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services-2/2008/3313/0/3313a171",
"title": "Skin Detection on Images with Color Deviation",
"doi": null,
"abstractUrl": "/proceedings-article/services-2/2008/3313a171/12OmNwl8GFv",
"parentPublication": {
"id": "proceedings/services-2/2008/3313/0",
"title": "Services Part II, IEEE Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011868",
"title": "Skin detection with illumination adaptation in single image",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011868/12OmNy50g21",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icciis/2010/4260/0/4260a266",
"title": "Research on a Skin Color Detection Algorithm Based on Self-adaptive Skin Color Model",
"doi": null,
"abstractUrl": "/proceedings-article/icciis/2010/4260a266/12OmNzBOij0",
"parentPublication": {
"id": "proceedings/icciis/2010/4260/0",
"title": "Communications and Intelligence Information Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2008/3278/0/3278a577",
"title": "Hand Detections Based on Invariant Skin-Color Models Constructed Using Linear and Nonlinear Color Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2008/3278a577/12OmNzXWZIS",
"parentPublication": {
"id": "proceedings/iih-msp/2008/3278/0",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2004/07/i0862",
"title": "Skin Color-Based Video Segmentation under Time-Varying Illumination",
"doi": null,
"abstractUrl": "/journal/tp/2004/07/i0862/13rRUygT7aa",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150626",
"title": "Illumination-based Transformations Improve Skin Lesion Segmentation in Dermoscopic Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150626/1lPHtWATm2A",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAsTgXc",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyPQ4GV",
"doi": "10.1109/ICCVW.2011.6130379",
"title": "RGBD-HuDaAct: A color-depth video database for human daily activity recognition",
"normalizedTitle": "RGBD-HuDaAct: A color-depth video database for human daily activity recognition",
"abstract": "In this paper, we present a home-monitoring oriented human activity recognition benchmark database, based on the combination of a color video camera and a depth sensor. Our contributions are two-fold: 1) We have created a publicly releasable human activity video database (i.e., named as RGBD-HuDaAct), which contains synchronized color-depth video streams, for the task of human daily activity recognition. This database aims at encouraging more research efforts on human activity recognition based on multi-modality sensor combination (e.g., color plus depth). 2) Two multi-modality fusion schemes, which naturally combine color and depth information, have been developed from two state-of-the-art feature representation methods for action recognition, i.e., spatio-temporal interest points (STIPs) and motion history images (MHIs). These depth-extended feature representation methods are evaluated comprehensively and superior recognition performances over their uni-modality (e.g., color only) counterparts are demonstrated.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a home-monitoring oriented human activity recognition benchmark database, based on the combination of a color video camera and a depth sensor. Our contributions are two-fold: 1) We have created a publicly releasable human activity video database (i.e., named as RGBD-HuDaAct), which contains synchronized color-depth video streams, for the task of human daily activity recognition. This database aims at encouraging more research efforts on human activity recognition based on multi-modality sensor combination (e.g., color plus depth). 2) Two multi-modality fusion schemes, which naturally combine color and depth information, have been developed from two state-of-the-art feature representation methods for action recognition, i.e., spatio-temporal interest points (STIPs) and motion history images (MHIs). These depth-extended feature representation methods are evaluated comprehensively and superior recognition performances over their uni-modality (e.g., color only) counterparts are demonstrated.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a home-monitoring oriented human activity recognition benchmark database, based on the combination of a color video camera and a depth sensor. Our contributions are two-fold: 1) We have created a publicly releasable human activity video database (i.e., named as RGBD-HuDaAct), which contains synchronized color-depth video streams, for the task of human daily activity recognition. This database aims at encouraging more research efforts on human activity recognition based on multi-modality sensor combination (e.g., color plus depth). 2) Two multi-modality fusion schemes, which naturally combine color and depth information, have been developed from two state-of-the-art feature representation methods for action recognition, i.e., spatio-temporal interest points (STIPs) and motion history images (MHIs). These depth-extended feature representation methods are evaluated comprehensively and superior recognition performances over their uni-modality (e.g., color only) counterparts are demonstrated.",
"fno": "06130379",
"keywords": [
"Feature Extraction",
"Image Colour Analysis",
"Image Fusion",
"Image Recognition",
"Image Representation",
"Video Cameras",
"Video Databases",
"Color Depth Video Database",
"Human Daily Activity Recognition",
"Home Monitoring",
"Color Video Camera",
"Depth Sensor",
"RGBD Hu Da Act Database",
"Multimodality Sensor Combination",
"Multimodality Fusion Scheme",
"Feature Representation Method",
"Spatio Temporal Interest Point",
"Motion History Image",
"Red Green Blue",
"Databases",
"Humans",
"History",
"Image Color Analysis",
"Cameras",
"Histograms",
"Visualization"
],
"authors": [
{
"affiliation": "Advanced Digital Sciences Center, Singapore 138632",
"fullName": "Bingbing Ni",
"givenName": "Bingbing",
"surname": "Ni",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Advanced Digital Sciences Center, Singapore 138632",
"fullName": "Gang Wang",
"givenName": null,
"surname": "Gang Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UIUC, IL 61820-5711 USA",
"fullName": "Pierre Moulin",
"givenName": "Pierre",
"surname": "Moulin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1147-1153",
"year": "2011",
"issn": null,
"isbn": "978-1-4673-0063-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06130378",
"articleId": "12OmNyQ7Gao",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06130380",
"articleId": "12OmNsd6vky",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209d493",
"title": "Clustered Multi-task Linear Discriminant Analysis for View Invariant Color-Depth Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209d493/12OmNqIzh9O",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217765",
"title": "Stroke patient daily activity observation system",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217765/12OmNrIrPpF",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460246",
"title": "Automatic segmentation fusing color and depth",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460246/12OmNvTBB00",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460989",
"title": "Image matting with color and depth information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460989/12OmNvlg8lg",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a910",
"title": "Depth Camera Based on Color-Coded Aperture",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a910/12OmNvm6VHm",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b377",
"title": "Depth Map Completion by Jointly Exploiting Blurry Color Images and Sparse Depth Maps",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b377/12OmNwwuE12",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a344",
"title": "Activity Recognition from RGB-D Camera with 3D Local Spatio-temporal Features",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a344/12OmNzVoBuk",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460793",
"title": "Single-frame hand gesture recognition using color and depth kernel descriptors",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460793/12OmNzlUKDr",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/05/07467541",
"title": "Super Normal Vector for Human Activity Recognition with Depth Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2017/05/07467541/13rRUwghd6e",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b095",
"title": "RGBD-Net: Predicting Color and Depth Images for Novel Views Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b095/1zWEdBgaMF2",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzVoBFM",
"doi": "",
"title": "Combining contrast saliency and region discontinuity for precise hand segmentation in projector-camera system",
"normalizedTitle": "Combining contrast saliency and region discontinuity for precise hand segmentation in projector-camera system",
"abstract": "One goal of projector-camera system is let human finger be used like a mouse to click and drag objects in the projected content. It requires segmentation of the human palm and fingers in the image data captured by the camera, which is a challenging task in the presence of the incessant variation of the projected video content and the shadow cast by the palm and fingers. We describe a coarse-to-fine hand segmentation method for projector-camera system. After rough segmentation by contrast saliency detection and mean shift-based discontinuity-preserved smoothing, the refined result is confirmed through confidence evaluation. Extensive experimental results are shown to illustrate the accuracy and efficiency of the approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One goal of projector-camera system is let human finger be used like a mouse to click and drag objects in the projected content. It requires segmentation of the human palm and fingers in the image data captured by the camera, which is a challenging task in the presence of the incessant variation of the projected video content and the shadow cast by the palm and fingers. We describe a coarse-to-fine hand segmentation method for projector-camera system. After rough segmentation by contrast saliency detection and mean shift-based discontinuity-preserved smoothing, the refined result is confirmed through confidence evaluation. Extensive experimental results are shown to illustrate the accuracy and efficiency of the approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One goal of projector-camera system is let human finger be used like a mouse to click and drag objects in the projected content. It requires segmentation of the human palm and fingers in the image data captured by the camera, which is a challenging task in the presence of the incessant variation of the projected video content and the shadow cast by the palm and fingers. We describe a coarse-to-fine hand segmentation method for projector-camera system. After rough segmentation by contrast saliency detection and mean shift-based discontinuity-preserved smoothing, the refined result is confirmed through confidence evaluation. Extensive experimental results are shown to illustrate the accuracy and efficiency of the approach.",
"fno": "06460590",
"keywords": [
"Cameras",
"Image Segmentation",
"Object Detection",
"Region Discontinuity",
"Precise Hand Segmentation",
"Projector Camera System",
"Human Finger",
"Projected Content",
"Human Palm Segmentation",
"Finger Segmentation",
"Image Data Capturing",
"Projected Video Content",
"Coarse To Fine Hand Segmentation Method",
"Contrast Saliency Detection",
"Mean Shift Based Discontinuity Preserved Smoothing",
"Confidence Evaluation",
"Image Color Analysis",
"Image Segmentation",
"Cameras",
"Smoothing Methods",
"Skin",
"Humans",
"Human Computer Interaction"
],
"authors": [
{
"affiliation": "Department of Mechanical and Automation Engineering, The Chinese University of Hong Kong, Shatin, NT, Hong Kong",
"fullName": "Jingwen Dai",
"givenName": "Jingwen",
"surname": "Dai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical and Automation Engineering, The Chinese University of Hong Kong, Shatin, NT, Hong Kong",
"fullName": "Ronald Chung",
"givenName": "Ronald",
"surname": "Chung",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "2161-2164",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460589",
"articleId": "12OmNwDj1fE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460591",
"articleId": "12OmNzcPAbx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177434",
"title": "Painted face effect removal by a projector-camera system with dynamic ambient light adaptability",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177434/12OmNqG0T4h",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239197",
"title": "Making any planar surface into a touch-sensitive display by a mere projector and camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239197/12OmNqN6R9K",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2010/4147/0/4147a819",
"title": "New Interface Using Palm and Fingertip without Marker for Ubiquitous Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2010/4147a819/12OmNwtEENR",
"parentPublication": {
"id": "proceedings/icis/2010/4147/0",
"title": "Computer and Information Science, ACIS International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2012/4608/0/4608b285",
"title": "Research of Color Correction Algorithm for Multi-projector Screen Based on Projector-Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2012/4608b285/12OmNxwENpp",
"parentPublication": {
"id": "proceedings/isdea/2012/4608/0",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mediacom/2010/4136/0/4136a083",
"title": "Latent Palmprint Image Segmentation Based on Dissimilarity Tolerance",
"doi": null,
"abstractUrl": "/proceedings-article/mediacom/2010/4136a083/12OmNyUWQZb",
"parentPublication": {
"id": "proceedings/mediacom/2010/4136/0",
"title": "2010 International Conference on Multimedia Communications (Mediacom 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699178",
"title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699178/19F1O0IjR8k",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699280",
"title": "Precise Surface Color Estimation Using a Non-Diagonal Reflectance Matrix on an Adaptive Projector-Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699280/19F1RBpcB0s",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a261",
"title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdsba/2020/8164/0/816400a050",
"title": "TIP: Tangible Interactive Projector with Projection Touch Tracking and Spatial Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icdsba/2020/816400a050/1xeWoj58YyA",
"parentPublication": {
"id": "proceedings/icdsba/2020/8164/0",
"title": "2020 4th Annual International Conference on Data Science and Business Analytics (ICDSBA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwtn3tc",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"acronym": "fcst",
"groupId": "1001309",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzzxuta",
"doi": "10.1109/FCST.2010.103",
"title": "Skin-Anatomy Based Face Texture Image Synthesis by Skin Feature Distribution Analyzing Method",
"normalizedTitle": "Skin-Anatomy Based Face Texture Image Synthesis by Skin Feature Distribution Analyzing Method",
"abstract": "This paper investigates solutions to improvement of realistic rendering quality of CG (Computer Graphics) character face. In order to manipulate face skin feature distribution, based on bio-anatomic structure, skin surface is classified into 3 layers (Internal, Regular and Local layer) to control skin base color, surface cellular venation level and noise distribution like freckle, respectively. Moreover, for the feature best re-targeting from real human to virtual character, a new face segmentation method is introduced by analyzing and overlaying the appropriate feature to the deserved area. By doing all above, a specific transformation from real human face skin data to virtual character is proposed which is meaningful for the synthesis of CG texture.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper investigates solutions to improvement of realistic rendering quality of CG (Computer Graphics) character face. In order to manipulate face skin feature distribution, based on bio-anatomic structure, skin surface is classified into 3 layers (Internal, Regular and Local layer) to control skin base color, surface cellular venation level and noise distribution like freckle, respectively. Moreover, for the feature best re-targeting from real human to virtual character, a new face segmentation method is introduced by analyzing and overlaying the appropriate feature to the deserved area. By doing all above, a specific transformation from real human face skin data to virtual character is proposed which is meaningful for the synthesis of CG texture.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper investigates solutions to improvement of realistic rendering quality of CG (Computer Graphics) character face. In order to manipulate face skin feature distribution, based on bio-anatomic structure, skin surface is classified into 3 layers (Internal, Regular and Local layer) to control skin base color, surface cellular venation level and noise distribution like freckle, respectively. Moreover, for the feature best re-targeting from real human to virtual character, a new face segmentation method is introduced by analyzing and overlaying the appropriate feature to the deserved area. By doing all above, a specific transformation from real human face skin data to virtual character is proposed which is meaningful for the synthesis of CG texture.",
"fno": "05577329",
"keywords": [
"Avatars",
"Face Recognition",
"Feature Extraction",
"Image Segmentation",
"Image Texture",
"Skin",
"Skin Anatomy Based Face Texture Image Synthesis",
"Skin Feature Distribution Analyzing Method",
"Realistic Rendering Quality",
"Computer Graphics",
"Bio Anatomic Structure",
"Skin Surface",
"Surface Cellular Venation Level",
"Noise Distribution",
"Freckle",
"Face Segmentation",
"Virtual Character",
"Skin",
"Face",
"Image Color Analysis",
"Pixel",
"Muscles",
"Humans",
"Noise"
],
"authors": [
{
"affiliation": null,
"fullName": "Rui Guo",
"givenName": "Rui",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hiroki Takahashi",
"givenName": "Hiroki",
"surname": "Takahashi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fcst",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "136-141",
"year": "2010",
"issn": "2159-6301",
"isbn": "978-1-4244-7779-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05577301",
"articleId": "12OmNvUaNm2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05577325",
"articleId": "12OmNzDNtsZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmlc/2003/7865/5/01260131",
"title": "Real-time face detection based on skin-color model and morphology filters",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2003/01260131/12OmNAQJzQj",
"parentPublication": {
"id": "proceedings/icmlc/2003/7865/1",
"title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2012/4880/3/4880c373",
"title": "Skin Segmentation Based on Human Face Illumination Feature",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2012/4880c373/12OmNAR1b0B",
"parentPublication": {
"id": "proceedings/wi-iat/2012/4880/1",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/1/01394213",
"title": "On the importance of skin color for \"other-race\" effect",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394213/12OmNBpVPZy",
"parentPublication": {
"id": "proceedings/icme/2004/8603/1",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgec/2010/4281/0/4281a687",
"title": "Face Detection Based on Feature Analysis and Edge Detection against Skin Color-like Backgrounds",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2010/4281a687/12OmNCctfgS",
"parentPublication": {
"id": "proceedings/icgec/2010/4281/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2008/3391/0/3391a457",
"title": "Multi-View Face Detection Based on AdaBoost and Skin Color",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2008/3391a457/12OmNqNG3dB",
"parentPublication": {
"id": "proceedings/icinis/2008/3391/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2015/9721/0/9721a663",
"title": "Fast Face Detection Based on Skin Segmentation and Facial Features",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2015/9721a663/12OmNrJAdRd",
"parentPublication": {
"id": "proceedings/sitis/2015/9721/0",
"title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2009/6325/0/05447254",
"title": "Face Detection Based on AdaBoost and Skin Color",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2009/05447254/12OmNxwENnr",
"parentPublication": {
"id": "proceedings/isise/2009/6325/0",
"title": "2009 Second International Symposium on Information Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdt/2009/3695/0/3695a059",
"title": "Skin Detection Using Contourlet-Based Texture Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icdt/2009/3695a059/12OmNyvoXkf",
"parentPublication": {
"id": "proceedings/icdt/2009/3695/0",
"title": "2009 Fourth International Conference on Digital Telecommunications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2016/9919/0/07459203",
"title": "Incorporating skin color for improved face detection and tracking system",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2016/07459203/12OmNzXWZJq",
"parentPublication": {
"id": "proceedings/ssiai/2016/9919/0",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150825",
"title": "FabSoften: Face Beautification via Dynamic Skin Smoothing, Guided Feathering, and Texture Restoration",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150825/1lPHrpmOT4Y",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "19wB16JGcSY",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"acronym": "mipr",
"groupId": "1825825",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "19wB1fUNWGk",
"doi": "10.1109/MIPR.2019.00095",
"title": "Garment Detectives: Discovering Clothes and Its Genre in Consumer Photos",
"normalizedTitle": "Garment Detectives: Discovering Clothes and Its Genre in Consumer Photos",
"abstract": "Clothing image analysis has shown its potential for use in a wide range of applications such as personalized clothing recommendation. Given a consumer photo, this paper addresses the problem of finding clothes and recognizing the genre of that clothes. This problem is very challenging due to large variations of uncontrolled realistic imaging conditions. To tackle these challenges, we formulate a novel framework by integrating local features of multimodality as the instances of the price-collecting Steiner tree (PCST) problem to discover clothing regions, and exploiting visual style elements to discover the clothing genre. The experimental results show that our fully automatic approach is effective to identify irregular shape of clothing region, and it significantly improves the accuracy of clothing genre recognition for images taken in unconstrained environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Clothing image analysis has shown its potential for use in a wide range of applications such as personalized clothing recommendation. Given a consumer photo, this paper addresses the problem of finding clothes and recognizing the genre of that clothes. This problem is very challenging due to large variations of uncontrolled realistic imaging conditions. To tackle these challenges, we formulate a novel framework by integrating local features of multimodality as the instances of the price-collecting Steiner tree (PCST) problem to discover clothing regions, and exploiting visual style elements to discover the clothing genre. The experimental results show that our fully automatic approach is effective to identify irregular shape of clothing region, and it significantly improves the accuracy of clothing genre recognition for images taken in unconstrained environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Clothing image analysis has shown its potential for use in a wide range of applications such as personalized clothing recommendation. Given a consumer photo, this paper addresses the problem of finding clothes and recognizing the genre of that clothes. This problem is very challenging due to large variations of uncontrolled realistic imaging conditions. To tackle these challenges, we formulate a novel framework by integrating local features of multimodality as the instances of the price-collecting Steiner tree (PCST) problem to discover clothing regions, and exploiting visual style elements to discover the clothing genre. The experimental results show that our fully automatic approach is effective to identify irregular shape of clothing region, and it significantly improves the accuracy of clothing genre recognition for images taken in unconstrained environment.",
"fno": "119800a471",
"keywords": [
"Clothing",
"Feature Extraction",
"Image Recognition",
"Visualization",
"Skin",
"Image Color Analysis",
"Shape",
"Clothing Image Analysis Detection And Recognition Clothing Genres Visual Style Elements"
],
"authors": [
{
"affiliation": null,
"fullName": "Shintami Chusnul Hidayati",
"givenName": "Shintami Chusnul",
"surname": "Hidayati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kai-Lung Hua",
"givenName": "Kai-Lung",
"surname": "Hua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu Tsao",
"givenName": "Yu",
"surname": "Tsao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hong-Han Shuai",
"givenName": "Hong-Han",
"surname": "Shuai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiaying Liu",
"givenName": "Jiaying",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wen-Huang Cheng",
"givenName": "Wen-Huang",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "471-474",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1198-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "119800a467",
"articleId": "19wB64tHc7C",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "119800a333",
"articleId": "19wB3BTJAbu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851b096",
"title": "DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851b096/12OmNqJq4uU",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670a436",
"title": "A Study on the Clothes Recommendation for a Cold Region of Japan",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670a436/12OmNvonIGI",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169791",
"title": "Stripe based clothes segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169791/12OmNwpoFyH",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a327",
"title": "Learning to Annotate Clothes in Everyday Photos: Multi-modal, Multi-label, Multi-instance Approach",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a327/12OmNy3AgBC",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460874",
"title": "Learning non-target items for interesting clothes segmentation in fashion images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460874/12OmNzZEAq5",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a550",
"title": "Clothes Keypoints Localization and Attribute Recognition via Prior Knowledge",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a550/1cdOTNMgmv6",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/06/08936426",
"title": "Person Re-Identification by Contour Sketch Under Moderate Clothing Change",
"doi": null,
"abstractUrl": "/journal/tp/2021/06/08936426/1fRz11hD8Tm",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412002",
"title": "UCCTGAN: Unsupervised Clothing Color Transformation Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412002/1tmiQLgudzy",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900d930",
"title": "IndoFashion : Apparel Classification for Indian Ethnic Clothes",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900d930/1yVzONYyUPS",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900q6923",
"title": "Disentangled Cycle Consistency for Highly-realistic Virtual Try-On",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900q6923/1yeLBaa0gzC",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1M4rj9heeBi",
"title": "2022 12th International Conference on Information Technology in Medicine and Education (ITME)",
"acronym": "itme",
"groupId": "1002567",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1M4rv5Veb0k",
"doi": "10.1109/ITME56794.2022.00047",
"title": "Personalized Custom Virtual Fitting Display Method",
"normalizedTitle": "Personalized Custom Virtual Fitting Display Method",
"abstract": "With the maturity of 3D and AR technologies, many e-commerce platforms have launched the function of virtual fitting rooms. Although it attracted a lot of attention, the real experience is not good. At present, the main problems of many virtual fitting platforms are inaccurate in clothing version and size, and the dressing effect is quite different. Therefore, the virtual fitting platform to customize personalized clothing has potential application value. In this study, we consider trying on different clothing on a detailed 3D mannequin to increase the experience. First, a human body database is established, and then the fine faces are obtained according to the 3D face scanning technology. According to the synthesis technology of human body and human face, a virtual human body with fine facial features is obtained. Finally, the different fabrics clothes can try on the virtual body to show the effect. The experimental results show that this method can quickly try on clothes of different fabrics to complete personalized customization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the maturity of 3D and AR technologies, many e-commerce platforms have launched the function of virtual fitting rooms. Although it attracted a lot of attention, the real experience is not good. At present, the main problems of many virtual fitting platforms are inaccurate in clothing version and size, and the dressing effect is quite different. Therefore, the virtual fitting platform to customize personalized clothing has potential application value. In this study, we consider trying on different clothing on a detailed 3D mannequin to increase the experience. First, a human body database is established, and then the fine faces are obtained according to the 3D face scanning technology. According to the synthesis technology of human body and human face, a virtual human body with fine facial features is obtained. Finally, the different fabrics clothes can try on the virtual body to show the effect. The experimental results show that this method can quickly try on clothes of different fabrics to complete personalized customization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the maturity of 3D and AR technologies, many e-commerce platforms have launched the function of virtual fitting rooms. Although it attracted a lot of attention, the real experience is not good. At present, the main problems of many virtual fitting platforms are inaccurate in clothing version and size, and the dressing effect is quite different. Therefore, the virtual fitting platform to customize personalized clothing has potential application value. In this study, we consider trying on different clothing on a detailed 3D mannequin to increase the experience. First, a human body database is established, and then the fine faces are obtained according to the 3D face scanning technology. According to the synthesis technology of human body and human face, a virtual human body with fine facial features is obtained. Finally, the different fabrics clothes can try on the virtual body to show the effect. The experimental results show that this method can quickly try on clothes of different fabrics to complete personalized customization.",
"fno": "101500a180",
"keywords": [
"Clothing",
"Electronic Commerce",
"Face Recognition",
"Virtual Reality",
"3 D Face Scanning Technology",
"Clothing Version",
"Detailed 3 D Mannequin",
"Different Clothing",
"Different Fabrics Clothes",
"E Commerce Platforms",
"Human Body Database",
"Human Face",
"Personalized Clothing",
"Personalized Custom Virtual Fitting Display Method",
"Personalized Customization",
"Synthesis Technology",
"Virtual Body",
"Virtual Fitting Platform",
"Virtual Fitting Rooms",
"Virtual Human Body",
"Three Dimensional Displays",
"Databases",
"Fitting",
"Clothing",
"Education",
"Fabrics",
"Electronic Commerce",
"Virtual Fitting",
"Human Model",
"Displaying Method"
],
"authors": [
{
"affiliation": "Huanghuai University,College of Computer and Artificial Intelligence,Zhumadian,China",
"fullName": "Yuxiang Zhu",
"givenName": "Yuxiang",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huanghuai University,College of Computer and Artificial Intelligence,Zhumadian,China",
"fullName": "Haitao Wu",
"givenName": "Haitao",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huanghuai University,College of Computer and Artificial Intelligence,Zhumadian,China",
"fullName": "Gangqiang Li",
"givenName": "Gangqiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huanghuai University,College of Computer and Artificial Intelligence,Zhumadian,China",
"fullName": "Jinfeng Gao",
"givenName": "Jinfeng",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huanghuai University,College of Computer and Artificial Intelligence,Zhumadian,China",
"fullName": "Shuan Liu",
"givenName": "Shuan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huanghuai University,College of Computer and Artificial Intelligence,Zhumadian,China",
"fullName": "Yu Zhang",
"givenName": "Yu",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huanghuai University,College of Computer and Artificial Intelligence,Zhumadian,China",
"fullName": "Junming Zhang",
"givenName": "Junming",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "itme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-11-01T00:00:00",
"pubType": "proceedings",
"pages": "180-183",
"year": "2022",
"issn": null,
"isbn": "979-8-3503-1015-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "101500a175",
"articleId": "1M4rnXUWFfG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "101500a184",
"articleId": "1M4rx6z7teM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/apsitt/2005/216/0/01593450",
"title": "A Study on Real-time Virtual Clothing system based on Two-Dimensional Plane Model",
"doi": null,
"abstractUrl": "/proceedings-article/apsitt/2005/01593450/12OmNrkT7O4",
"parentPublication": {
"id": "proceedings/apsitt/2005/216/0",
"title": "6th Asia-Pacific Symposium on Information and Telecommunication Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/niss/2009/3687/0/3687a139",
"title": "Online Cloth Virtual Fitting Room Based on a Local Cluster",
"doi": null,
"abstractUrl": "/proceedings-article/niss/2009/3687a139/12OmNwJPMVH",
"parentPublication": {
"id": "proceedings/niss/2009/3687/0",
"title": "2009 International Conference on New Trends in Information and Service Science (NISS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scset/2022/7876/0/787600a199",
"title": "Research and Prospect of 3D Virtual Display Technology of Clothing",
"doi": null,
"abstractUrl": "/proceedings-article/scset/2022/787600a199/1ANLZahi1sQ",
"parentPublication": {
"id": "proceedings/scset/2022/7876/0",
"title": "2022 International Seminar on Computer Science and Engineering Technology (SCSET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3219",
"title": "M3D-VTON: A Monocular-to-3D Virtual Try-On Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3219/1BmI18LoUMg",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900c230",
"title": "Dress Code: High-Resolution Multi-Category Virtual Try-On",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900c230/1G56G2vyxTa",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900c235",
"title": "Towards Detailed Characteristic-Preserving Virtual Try-On",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900c235/1G57e7wHcFq",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600k0789",
"title": "ClothFormer: Taming Video Virtual Try-on in All Module",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600k0789/1H0KOWx8zPa",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aemcse/2022/8474/0/847400a267",
"title": "Research on the evaluation method of virtual clothing pressure comfort based on fuzzy clustering",
"doi": null,
"abstractUrl": "/proceedings-article/aemcse/2022/847400a267/1IlNVrVGCUU",
"parentPublication": {
"id": "proceedings/aemcse/2022/8474/0",
"title": "2022 5th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2022/06/10017425",
"title": "VCFN: Virtual Cloth Fitting Try-On Network",
"doi": null,
"abstractUrl": "/magazine/it/2022/06/10017425/1JYZAiOXfpK",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900o4126",
"title": "VITON-HD: High-Resolution Virtual Try-On via Misalignment-Aware Normalization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900o4126/1yeLalrLSmY",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCmpcNk",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBRsVxy",
"doi": "10.1109/VISUAL.2005.1532855",
"title": "Illustrative display of hidden iso-surface structures",
"normalizedTitle": "Illustrative display of hidden iso-surface structures",
"abstract": "Indirect volume rendering is a widespread method for the display of volume datasets. It is based on the extraction of polygonal iso-surfaces from volumetric data, which are then rendered using conventional rasterization methods. Whereas this rendering approach is fast and relatively easy to implement, it cannot easily provide an understandable display of structures occluded by the directly visible iso-surface. Simple approaches like alpha-blending for transparency when drawing the iso-surface often generate a visually complex output, which is difficult to interpret. Moreover, such methods can significantly increase the computational complexity of the rendering process. In this paper, we therefore propose a new approach for the illustrative indirect rendering of volume data in real-time. This algorithm emphasizes the silhouette of objects represented by the iso-surface. Additionally, shading intensities on objects are reproduced with a monochrome hatching technique. Using a specially designed two-pass rendering process, structures behind the front layer of the iso-surface are automatically extracted with a depth peeling method. The shapes of these hidden structures are also displayed as silhouette outlines. As an additional option, the geometry of explicitly specified inner objects can be displayed with constant translucency. Although these inner objects always remain visible, a specific shading and depth attenuation method is used to convey the depth relationships. We describe the implementation of the algorithm, which exploits the programmability of state-of-the-art graphics processing units (GPUs). The algorithm described in this paper does not require any preprocessing of the input data or a manual definition of inner structures. Since the presented method works on iso-surfaces, which are stored as polygonal datasets, it can also be applied to other types of polygonal models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Indirect volume rendering is a widespread method for the display of volume datasets. It is based on the extraction of polygonal iso-surfaces from volumetric data, which are then rendered using conventional rasterization methods. Whereas this rendering approach is fast and relatively easy to implement, it cannot easily provide an understandable display of structures occluded by the directly visible iso-surface. Simple approaches like alpha-blending for transparency when drawing the iso-surface often generate a visually complex output, which is difficult to interpret. Moreover, such methods can significantly increase the computational complexity of the rendering process. In this paper, we therefore propose a new approach for the illustrative indirect rendering of volume data in real-time. This algorithm emphasizes the silhouette of objects represented by the iso-surface. Additionally, shading intensities on objects are reproduced with a monochrome hatching technique. Using a specially designed two-pass rendering process, structures behind the front layer of the iso-surface are automatically extracted with a depth peeling method. The shapes of these hidden structures are also displayed as silhouette outlines. As an additional option, the geometry of explicitly specified inner objects can be displayed with constant translucency. Although these inner objects always remain visible, a specific shading and depth attenuation method is used to convey the depth relationships. We describe the implementation of the algorithm, which exploits the programmability of state-of-the-art graphics processing units (GPUs). The algorithm described in this paper does not require any preprocessing of the input data or a manual definition of inner structures. Since the presented method works on iso-surfaces, which are stored as polygonal datasets, it can also be applied to other types of polygonal models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Indirect volume rendering is a widespread method for the display of volume datasets. It is based on the extraction of polygonal iso-surfaces from volumetric data, which are then rendered using conventional rasterization methods. Whereas this rendering approach is fast and relatively easy to implement, it cannot easily provide an understandable display of structures occluded by the directly visible iso-surface. Simple approaches like alpha-blending for transparency when drawing the iso-surface often generate a visually complex output, which is difficult to interpret. Moreover, such methods can significantly increase the computational complexity of the rendering process. In this paper, we therefore propose a new approach for the illustrative indirect rendering of volume data in real-time. This algorithm emphasizes the silhouette of objects represented by the iso-surface. Additionally, shading intensities on objects are reproduced with a monochrome hatching technique. Using a specially designed two-pass rendering process, structures behind the front layer of the iso-surface are automatically extracted with a depth peeling method. The shapes of these hidden structures are also displayed as silhouette outlines. As an additional option, the geometry of explicitly specified inner objects can be displayed with constant translucency. Although these inner objects always remain visible, a specific shading and depth attenuation method is used to convey the depth relationships. We describe the implementation of the algorithm, which exploits the programmability of state-of-the-art graphics processing units (GPUs). The algorithm described in this paper does not require any preprocessing of the input data or a manual definition of inner structures. Since the presented method works on iso-surfaces, which are stored as polygonal datasets, it can also be applied to other types of polygonal models.",
"fno": "01532855",
"keywords": [
"Object Detection",
"Surface Fitting",
"Rendering Computer Graphics",
"Computational Geometry",
"Data Visualisation",
"Hidden Feature Removal",
"Indirect Volume Rendering",
"Hidden Iso Surface Structures",
"Rasterization Methods",
"Alpha Blending",
"Object Silhouette",
"Monochrome Hatching Technique",
"Depth Peeling Method",
"Depth Attenuation Method",
"Graphics Processing Units",
"Polygonal Datasets",
"Illustrative Display",
"Displays",
"Rendering Computer Graphics",
"Geometry",
"Biomedical Imaging",
"Shape",
"Data Mining",
"Computer Graphics",
"Engines",
"Image Generation",
"Isosurfaces"
],
"authors": [
{
"affiliation": "Visual Comput. for Medicine, Tubingen Univ., Germany",
"fullName": "J. Fischer",
"givenName": "J.",
"surname": "Fischer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visual Comput. for Medicine, Tubingen Univ., Germany",
"fullName": "D. Bartz",
"givenName": "D.",
"surname": "Bartz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "W. Strasser",
"givenName": "W.",
"surname": "Strasser",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-01-01T00:00:00",
"pubType": "proceedings",
"pages": "663,664,665,666,667,668,669,670",
"year": "2005",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "27660070",
"articleId": "12OmNxeusY2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "27660071",
"articleId": "12OmNyyeWvl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2005/2766/0/27660084",
"title": "Illustrative Display of Hidden Iso-Surface Structures",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660084/12OmNCdBDHh",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbmsys/1990/9040/0/00109377",
"title": "Dynamic detection of hidden-surfaces using a MIMD multiprocessor",
"doi": null,
"abstractUrl": "/proceedings-article/cbmsys/1990/00109377/12OmNvjgWpA",
"parentPublication": {
"id": "proceedings/cbmsys/1990/9040/0",
"title": "1990 Proceedings Third Annual IEEE Symposium on Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156372",
"title": "Computation-to-core mapping strategies for iso-surface volume rendering on GPUs",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156372/12OmNwkzulc",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1994/6627/0/00346306",
"title": "Nonpolygonal isosurface rendering for large volume datasets",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1994/00346306/12OmNx8Ouv6",
"parentPublication": {
"id": "proceedings/visual/1994/6627/0",
"title": "Proceedings Visualization '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1990/2083/0/00146401",
"title": "Accurate display of tensor product isosurfaces",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1990/00146401/12OmNyfvpQC",
"parentPublication": {
"id": "proceedings/visual/1990/2083/0",
"title": "1990 First IEEE Conference on Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbg/2005/20/0/01500313",
"title": "High-quality surface splatting on today's GPUs",
"doi": null,
"abstractUrl": "/proceedings-article/pbg/2005/01500313/12OmNzdoMWS",
"parentPublication": {
"id": "proceedings/pbg/2005/20/0",
"title": "Point-Based Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050838",
"title": "Smart Transparency for Illustrative Visualization of Complex Flow Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050838/13rRUxC0SvU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061329",
"title": "Illustrative Stream Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061329/13rRUxcsYLM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g068",
"title": "Differentiable Surface Rendering via Non-Differentiable Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g068/1BmFpmQFMKA",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csii-bcd/2017/3302/0/3302a190",
"title": "Depth Recognition in 3D Translucent Stereoscopic Imaging of Medical Volumes by Means of a Glasses-Free 3D Display",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csii-bcd/2017/3302a190/1cdOB3HCeTm",
"parentPublication": {
"id": "proceedings/acit-csii-bcd/2017/3302/0",
"title": "2017 5th Intl Conf on Applied Computing and Information Technology/4th Intl Conf on Computational Science/Intelligence and Applied Informatics/2nd Intl Conf on Big Data, Cloud Computing, Data Science (ACIT-CSII-BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBTawmY",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"acronym": "sibgrapi",
"groupId": "1000131",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCctfoC",
"doi": "10.1109/SIBGRAPI.2012.27",
"title": "Memory-Efficient Order-Independent Transparency with Dynamic Fragment Buffer",
"normalizedTitle": "Memory-Efficient Order-Independent Transparency with Dynamic Fragment Buffer",
"abstract": "Order-independent transparency (OIT) rendering is computationally intensive due to required sorting and sufficient memory to store fragments before sorting. We present Dynamic Fragment Buffer, a revamped two-pass OIT rendering technique, which performs correct blending of a large number of transparent layers at interactive frame rates. Our approach self-adjusts memory allocation to handle a variable number of fragments per pixel without wasting memory. In this paper we perform a detailed analysis of several design decisions which lead to this technique. We present a collection of experiments that illustrate the advantages of our technique with respect to others OIT algorithms in the literature.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Order-independent transparency (OIT) rendering is computationally intensive due to required sorting and sufficient memory to store fragments before sorting. We present Dynamic Fragment Buffer, a revamped two-pass OIT rendering technique, which performs correct blending of a large number of transparent layers at interactive frame rates. Our approach self-adjusts memory allocation to handle a variable number of fragments per pixel without wasting memory. In this paper we perform a detailed analysis of several design decisions which lead to this technique. We present a collection of experiments that illustrate the advantages of our technique with respect to others OIT algorithms in the literature.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Order-independent transparency (OIT) rendering is computationally intensive due to required sorting and sufficient memory to store fragments before sorting. We present Dynamic Fragment Buffer, a revamped two-pass OIT rendering technique, which performs correct blending of a large number of transparent layers at interactive frame rates. Our approach self-adjusts memory allocation to handle a variable number of fragments per pixel without wasting memory. In this paper we perform a detailed analysis of several design decisions which lead to this technique. We present a collection of experiments that illustrate the advantages of our technique with respect to others OIT algorithms in the literature.",
"fno": "4829a134",
"keywords": [
"Geometry",
"Memory Management",
"Rendering Computer Graphics",
"Graphics Processing Units",
"Sorting",
"Buffer Storage",
"Indexes",
"Order Independent Transparency"
],
"authors": [
{
"affiliation": null,
"fullName": "Marilena Maule",
"givenName": "Marilena",
"surname": "Maule",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Joao L. D. Comba",
"givenName": "Joao L. D.",
"surname": "Comba",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rafael Torchelsen",
"givenName": "Rafael",
"surname": "Torchelsen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rui Bastos",
"givenName": "Rui",
"surname": "Bastos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sibgrapi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-08-01T00:00:00",
"pubType": "proceedings",
"pages": "134-141",
"year": "2012",
"issn": "1530-1834",
"isbn": "978-1-4673-2802-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4829a126",
"articleId": "12OmNzIUfWI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4829a142",
"articleId": "12OmNBhZ4qF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2016/2303/0/2303a057",
"title": "Interactive Screenspace Stream-Compaction Fragment Rendering of Direct Illumination from Area Lights",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a057/12OmNCdk2W8",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2009/3872/0/3872b221",
"title": "Order Independent Incremental Evolving Fuzzy Grammar Fragment Learner",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2009/3872b221/12OmNzG4gty",
"parentPublication": {
"id": "proceedings/isda/2009/3872/0",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1998/9176/0/91760351",
"title": "Pixel Masks for Screen-Door Transparency",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1998/91760351/12OmNzYwc4V",
"parentPublication": {
"id": "proceedings/ieee-vis/1998/9176/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/06/07070744",
"title": "Z_$k^+$_Z -buffer: An Efficient, Memory-Friendly and Dynamic Z_$k$_Z -buffer Framework",
"doi": null,
"abstractUrl": "/journal/tg/2015/06/07070744/13rRUwdrdSB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/05/v0584",
"title": "Interactive Transparency Rendering for Large CAD Models",
"doi": null,
"abstractUrl": "/journal/tg/2005/05/v0584/13rRUwhpBO0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/08/ttg2011081036",
"title": "Stochastic Transparency",
"doi": null,
"abstractUrl": "/journal/tg/2011/08/ttg2011081036/13rRUxBa55X",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050838",
"title": "Smart Transparency for Illustrative Visualization of Complex Flow Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050838/13rRUxC0SvU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/03/v0285",
"title": "Hardware-Assisted Visibility Sorting for Unstructured Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2005/03/v0285/13rRUxOdD89",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/02/ttg2014020238",
"title": "Memory-Hazard-Aware K-Buffer Algorithm for Order-Independent Transparency Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2014/02/ttg2014020238/13rRUypp57F",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09007507",
"title": "A Comparison of Rendering Techniques for 3D Line Sets With Transparency",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09007507/1hJKlGGBnpu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBqv2pP",
"doi": "10.1109/ICCV.2017.586",
"title": "Non-rigid Object Tracking via Deformable Patches Using Shape-Preserved KCF and Level Sets",
"normalizedTitle": "Non-rigid Object Tracking via Deformable Patches Using Shape-Preserved KCF and Level Sets",
"abstract": "Part-based trackers are effective in exploiting local details of the target object for robust tracking. In contrast to most existing part-based methods that divide all kinds of target objects into a number of fixed rectangular patches, in this paper, we propose a novel framework in which a set of deformable patches dynamically collaborate on tracking of non-rigid objects. In particular, we proposed a shape-preserved kernelized correlation filter (SP-KCF) which can accommodate target shape information for robust tracking. The SP-KCF is introduced into the level set framework for dynamic tracking of individual patches. In this manner, our proposed deformable patches are target-dependent, have the capability to assume complex topology, and are deformable to adapt to target variations. As these deformable patches properly capture individual target subregions, we exploit their photometric discrimination and shape variation to reveal the trackability of individual target subregions, which enables the proposed tracker to dynamically take advantage of those subregions with good trackability for target likelihood estimation. Finally the shape information of these deformable patches enables accurate object contours to be computed as the tracking output. Experimental results on the latest public sets of challenging sequences demonstrate the effectiveness of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Part-based trackers are effective in exploiting local details of the target object for robust tracking. In contrast to most existing part-based methods that divide all kinds of target objects into a number of fixed rectangular patches, in this paper, we propose a novel framework in which a set of deformable patches dynamically collaborate on tracking of non-rigid objects. In particular, we proposed a shape-preserved kernelized correlation filter (SP-KCF) which can accommodate target shape information for robust tracking. The SP-KCF is introduced into the level set framework for dynamic tracking of individual patches. In this manner, our proposed deformable patches are target-dependent, have the capability to assume complex topology, and are deformable to adapt to target variations. As these deformable patches properly capture individual target subregions, we exploit their photometric discrimination and shape variation to reveal the trackability of individual target subregions, which enables the proposed tracker to dynamically take advantage of those subregions with good trackability for target likelihood estimation. Finally the shape information of these deformable patches enables accurate object contours to be computed as the tracking output. Experimental results on the latest public sets of challenging sequences demonstrate the effectiveness of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Part-based trackers are effective in exploiting local details of the target object for robust tracking. In contrast to most existing part-based methods that divide all kinds of target objects into a number of fixed rectangular patches, in this paper, we propose a novel framework in which a set of deformable patches dynamically collaborate on tracking of non-rigid objects. In particular, we proposed a shape-preserved kernelized correlation filter (SP-KCF) which can accommodate target shape information for robust tracking. The SP-KCF is introduced into the level set framework for dynamic tracking of individual patches. In this manner, our proposed deformable patches are target-dependent, have the capability to assume complex topology, and are deformable to adapt to target variations. As these deformable patches properly capture individual target subregions, we exploit their photometric discrimination and shape variation to reveal the trackability of individual target subregions, which enables the proposed tracker to dynamically take advantage of those subregions with good trackability for target likelihood estimation. Finally the shape information of these deformable patches enables accurate object contours to be computed as the tracking output. Experimental results on the latest public sets of challenging sequences demonstrate the effectiveness of the proposed method.",
"fno": "1032f496",
"keywords": [
"Feature Extraction",
"Filtering Theory",
"Image Representation",
"Image Segmentation",
"Image Sequences",
"Maximum Likelihood Estimation",
"Object Detection",
"Object Tracking",
"Target Tracking",
"Tracking Output",
"Target Likelihood Estimation",
"Individual Target Subregions",
"Target Variations",
"Individual Patches",
"Dynamic Tracking",
"Level Set Framework",
"Target Shape Information",
"SP KCF",
"Nonrigid Objects",
"Fixed Rectangular Patches",
"Robust Tracking",
"Level Sets",
"Deformable Patches",
"Nonrigid Object Tracking",
"Target Tracking",
"Level Set",
"Shape",
"Robustness",
"Adaptation Models",
"Topology",
"Correlation"
],
"authors": [
{
"affiliation": null,
"fullName": "Xin Sun",
"givenName": "Xin",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ngai-Man Cheung",
"givenName": "Ngai-Man",
"surname": "Cheung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hongxun Yao",
"givenName": "Hongxun",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yiluan Guo",
"givenName": "Yiluan",
"surname": "Guo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "5496-5504",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032f487",
"articleId": "12OmNCctf7u",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032f505",
"articleId": "12OmNwogh3Y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2009/4420/0/05459276",
"title": "Adaptive fragments-based tracking of non-rigid objects using level sets",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459276/12OmNBTs7we",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2014/7434/0/7434a260",
"title": "Tracking Non-rigid Object Using Discriminative Features",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2014/7434a260/12OmNrkBwxO",
"parentPublication": {
"id": "proceedings/cis/2014/7434/0",
"title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c179",
"title": "Real-Time Tracking via Deformable Structure Regression Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c179/12OmNvjgWzm",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b002",
"title": "Multiphase Image Segmentation Using the Deformable Simplicial Complex Method",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b002/12OmNxWLTqW",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccbd/2016/3555/0/3555a248",
"title": "A Robust Appearance Model for Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ccbd/2016/3555a248/12OmNxuXcvS",
"parentPublication": {
"id": "proceedings/ccbd/2016/3555/0",
"title": "2016 7th International Conference on Cloud Computing and Big Data (CCBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c917",
"title": "Single Image Super-resolution Using Deformable Patches",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c917/12OmNyfdOSd",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/03/07516689",
"title": "Dynamic Projection Mapping onto Deforming Non-Rigid Surface Using Deformable Dot Cluster Marker",
"doi": null,
"abstractUrl": "/journal/tg/2017/03/07516689/13rRUwdIOUR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050852",
"title": "Spring Level Sets: A Deformable Model Representation to Provide Interoperability between Meshes and Level Sets",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050852/13rRUwfZBVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpbd&is/2019/0466/0/08735493",
"title": "A Multi-Patch Network for Non-Rigid Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/hpbd&is/2019/08735493/1aPuQy42GXu",
"parentPublication": {
"id": "proceedings/hpbd&is/2019/0466/0",
"title": "2019 International Conference on High Performance Big Data and Intelligent Systems (HPBD&IS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g727",
"title": "Deformable Siamese Attention Networks for Visual Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g727/1m3nuHWHIGY",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAS9zxg",
"title": "2014 IEEE 27th International Symposium on Computer-Based Medical Systems (CBMS)",
"acronym": "cbms",
"groupId": "1000153",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqIhFK6",
"doi": "10.1109/CBMS.2014.76",
"title": "Patient-Specific Interactive Simulation of Compression Ultrasonography",
"normalizedTitle": "Patient-Specific Interactive Simulation of Compression Ultrasonography",
"abstract": "We are developing an ultrasonography training system that promises to accelerate the broader use of ultrasound imaging in healthcare. Aiming at cheaper, more efficient, and more effective ultrasound training, a key feature of our system is the real-time, interactive simulation of a 3D virtual patient that, unlike conventional, purely geometric models of the human body, includes deformable soft tissues. Since soft-tissue deformation is an important factor in the clinical practice of ultrasound imaging, our objective in this paper is to incorporate real-time interactive soft tissue mechanics simulation into our 3D patient model. To this end, we adapt and evaluate two well-known deformable model simulation methods-mass-spring-damper systems and the finite element method-and we apply these methods to the simulation of ultrasound imaging in soft tissues, obtaining promising results on a multicore laptop computer.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We are developing an ultrasonography training system that promises to accelerate the broader use of ultrasound imaging in healthcare. Aiming at cheaper, more efficient, and more effective ultrasound training, a key feature of our system is the real-time, interactive simulation of a 3D virtual patient that, unlike conventional, purely geometric models of the human body, includes deformable soft tissues. Since soft-tissue deformation is an important factor in the clinical practice of ultrasound imaging, our objective in this paper is to incorporate real-time interactive soft tissue mechanics simulation into our 3D patient model. To this end, we adapt and evaluate two well-known deformable model simulation methods-mass-spring-damper systems and the finite element method-and we apply these methods to the simulation of ultrasound imaging in soft tissues, obtaining promising results on a multicore laptop computer.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We are developing an ultrasonography training system that promises to accelerate the broader use of ultrasound imaging in healthcare. Aiming at cheaper, more efficient, and more effective ultrasound training, a key feature of our system is the real-time, interactive simulation of a 3D virtual patient that, unlike conventional, purely geometric models of the human body, includes deformable soft tissues. Since soft-tissue deformation is an important factor in the clinical practice of ultrasound imaging, our objective in this paper is to incorporate real-time interactive soft tissue mechanics simulation into our 3D patient model. To this end, we adapt and evaluate two well-known deformable model simulation methods-mass-spring-damper systems and the finite element method-and we apply these methods to the simulation of ultrasound imaging in soft tissues, obtaining promising results on a multicore laptop computer.",
"fno": "4435a113",
"keywords": [
"Ultrasonic Imaging",
"Skin",
"Probes",
"Deformable Models",
"Finite Element Analysis",
"Springs",
"Computational Modeling",
"FEM",
"Ultrasound Training",
"Ultrasound Simulation",
"Interactive Simulation",
"Deformable Models",
"MSDS"
],
"authors": [
{
"affiliation": null,
"fullName": "Kresimir Petrinec",
"givenName": "Kresimir",
"surname": "Petrinec",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Eric Savitsky",
"givenName": "Eric",
"surname": "Savitsky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Demetri Terzopoulos",
"givenName": "Demetri",
"surname": "Terzopoulos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cbms",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-05-01T00:00:00",
"pubType": "proceedings",
"pages": "113-118",
"year": "2014",
"issn": "2372-9198",
"isbn": "978-1-4799-4435-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4435a109",
"articleId": "12OmNrAdsvw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4435a119",
"articleId": "12OmNx4Q6zi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926680",
"title": "Ultrasound Tracking Using ProbeSight: Camera Pose Estimation Relative to External Anatomy by Inverse Rendering of a Prior High-Resolution 3D Surface Map",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926680/12OmNBp52xs",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2017/1324/0/132401a223",
"title": "Augmenting a Wireless Portable Ultrasound Imaging with a real-time Hemodynamics Solver",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2017/132401a223/12OmNCwlagj",
"parentPublication": {
"id": "proceedings/bibe/2017/1324/0",
"title": "2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/memea/2009/3598/0/05167954",
"title": "A basic study of ultrasonic shear wave elastography in tissue-mimicking phantoms",
"doi": null,
"abstractUrl": "/proceedings-article/memea/2009/05167954/12OmNqIzgTb",
"parentPublication": {
"id": "proceedings/memea/2009/3598/0",
"title": "Medical Measurement and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761741",
"title": "Real-time update of 3D deformable models for computer aided liver surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761741/12OmNrMHOmG",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050852",
"title": "Spring Level Sets: A Deformable Model Representation to Provide Interoperability between Meshes and Level Sets",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050852/13rRUwfZBVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/mc/2015/02/07312456",
"title": "Asthma Pattern Identification via Continuous Diaphragm Motion Monitoring",
"doi": null,
"abstractUrl": "/journal/mc/2015/02/07312456/13rRUxlgxPM",
"parentPublication": {
"id": "trans/mc",
"title": "IEEE Transactions on Multi-Scale Computing Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a262",
"title": "3D Ultrasound Imaging of Scoliosis with Force-Sensitive Robotic Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a262/18M7gn48n4c",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2022/8487/0/848700a045",
"title": "Multi-modal lung ultrasound image classification by fusing image-based features and probe information",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2022/848700a045/1J6hEfq64k8",
"parentPublication": {
"id": "proceedings/bibe/2022/8487/0",
"title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a785",
"title": "Imaging carotid wall mechanical heterogeneity in ultrasound image sequences using Eulerian video magnification",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a785/1pBMnmfBBf2",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a637",
"title": "Shear wave elastography in ex vivo and in vivo skin using high-frequency ultrasound imaging",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a637/1pBMrn9ghEs",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNA0MYZb",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwI8cha",
"doi": "10.1109/CVPR.2008.4587475",
"title": "A multi-compartment segmentation framework with homeomorphic level sets",
"normalizedTitle": "A multi-compartment segmentation framework with homeomorphic level sets",
"abstract": "The simultaneous segmentation of multiple objects is an important problem in many imaging and computer vision applications. Various extensions of level set segmentation techniques to multiple objects have been proposed; however, no one method maintains object relationships, preserves topology, is computationally efficient, and provides an object-dependent internal and external force capability. In this paper, a framework for segmenting multiple objects that permits different forces to be applied to different boundaries while maintaining object topology and relationships is presented. Because of this framework, the segmentation of multiple objects each with multiple compartments is supported, and no overlaps or vacuums are generated. The computational complexity of this approach is independent of the number of objects to segment, thereby permitting the simultaneous segmentation of a large number of components. The properties of this approach and comparisons to existing methods are shown using a variety of images, both synthetic and real.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The simultaneous segmentation of multiple objects is an important problem in many imaging and computer vision applications. Various extensions of level set segmentation techniques to multiple objects have been proposed; however, no one method maintains object relationships, preserves topology, is computationally efficient, and provides an object-dependent internal and external force capability. In this paper, a framework for segmenting multiple objects that permits different forces to be applied to different boundaries while maintaining object topology and relationships is presented. Because of this framework, the segmentation of multiple objects each with multiple compartments is supported, and no overlaps or vacuums are generated. The computational complexity of this approach is independent of the number of objects to segment, thereby permitting the simultaneous segmentation of a large number of components. The properties of this approach and comparisons to existing methods are shown using a variety of images, both synthetic and real.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The simultaneous segmentation of multiple objects is an important problem in many imaging and computer vision applications. Various extensions of level set segmentation techniques to multiple objects have been proposed; however, no one method maintains object relationships, preserves topology, is computationally efficient, and provides an object-dependent internal and external force capability. In this paper, a framework for segmenting multiple objects that permits different forces to be applied to different boundaries while maintaining object topology and relationships is presented. Because of this framework, the segmentation of multiple objects each with multiple compartments is supported, and no overlaps or vacuums are generated. The computational complexity of this approach is independent of the number of objects to segment, thereby permitting the simultaneous segmentation of a large number of components. The properties of this approach and comparisons to existing methods are shown using a variety of images, both synthetic and real.",
"fno": "04587475",
"keywords": [
"Computational Complexity",
"Computer Vision",
"Image Segmentation",
"Object Detection",
"Multicompartment Segmentation Framework",
"Homeomorphic Level Sets",
"Computer Vision",
"Object Topology",
"Computational Complexity",
"Level Set",
"Image Segmentation",
"Topology",
"Computer Vision",
"Application Software",
"Deformable Models",
"Computational Complexity",
"Anatomy",
"Biomedical Imaging",
"Geometry"
],
"authors": [
{
"affiliation": "Johns Hopkins University, Baltimore MD 21218, USA",
"fullName": "Xian Fan",
"givenName": null,
"surname": "Xian Fan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Johns Hopkins University, Baltimore MD 21218, USA",
"fullName": "Pierre-Louis Bazin",
"givenName": "Pierre-Louis",
"surname": "Bazin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Johns Hopkins University, Baltimore MD 21218, USA",
"fullName": "Jerry L. Prince",
"givenName": "Jerry L.",
"surname": "Prince",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2008",
"issn": "1063-6919",
"isbn": "978-1-4244-2242-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04587459",
"articleId": "12OmNwswg2x",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04587461",
"articleId": "12OmNxzuMCm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2006/2646/0/26460177",
"title": "Mutual Segmentation with Level Sets",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2006/26460177/12OmNBRsVuF",
"parentPublication": {
"id": "proceedings/cvprw/2006/2646/0",
"title": "2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2006/2597/1/259711015",
"title": "Segmentation by Level Sets and Symmetry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2006/259711015/12OmNBscCZp",
"parentPublication": {
"id": "proceedings/cvpr/2006/2597/2",
"title": "2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206524",
"title": "Multiphase geometric couplings for the segmentation of neural processes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206524/12OmNCh0Pck",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2009/3575/0/3575b012",
"title": "Simultaneous Background/Foreground Segmentation and Contour Smoothing with Level Set Based Partial Differential Equation for Intelligent Surveillance Systems over Network",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2009/3575b012/12OmNqzcvJU",
"parentPublication": {
"id": "proceedings/cisis/2009/3575/0",
"title": "2009 International Conference on Complex, Intelligent and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270254",
"title": "Multiple Class Segmentation Using A Unified Framework over Mean-Shift Patches",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270254/12OmNvA1hpM",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d344",
"title": "A Multi-phase Level Set Method for Image Segmentation Based on the Mumford-Shah Model",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d344/12OmNwJybTd",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118a296",
"title": "An Exemplar-Based CRF for Multi-instance Object Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a296/12OmNx8fifu",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/02/ttg2012020202",
"title": "Interactive Image Segmentation Based on Level Sets of Probabilities",
"doi": null,
"abstractUrl": "/journal/tg/2012/02/ttg2012020202/13rRUIJuxpu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/06/i0945",
"title": "Curve/Surface Representation and Evolution Using Vector Level Sets with Application to the Shape-Based Segmentation Problem",
"doi": null,
"abstractUrl": "/journal/tp/2007/06/i0945/13rRUxly96C",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrNh0vw",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxWLTqW",
"doi": "10.1109/ICPR.2014.182",
"title": "Multiphase Image Segmentation Using the Deformable Simplicial Complex Method",
"normalizedTitle": "Multiphase Image Segmentation Using the Deformable Simplicial Complex Method",
"abstract": "The deformable simplicial complex method is a generic method for tracking deformable interfaces. It provides explicit interface representation, topological adaptivity, and multiphase support. As such, the deformable simplicial complex method can readily be used for representing active contours in image segmentation based on deformable models. We show the benefits of using the deformable simplicial complex method for image segmentation by segmenting an image into a known number of segments characterized by distinct mean pixel intensities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The deformable simplicial complex method is a generic method for tracking deformable interfaces. It provides explicit interface representation, topological adaptivity, and multiphase support. As such, the deformable simplicial complex method can readily be used for representing active contours in image segmentation based on deformable models. We show the benefits of using the deformable simplicial complex method for image segmentation by segmenting an image into a known number of segments characterized by distinct mean pixel intensities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The deformable simplicial complex method is a generic method for tracking deformable interfaces. It provides explicit interface representation, topological adaptivity, and multiphase support. As such, the deformable simplicial complex method can readily be used for representing active contours in image segmentation based on deformable models. We show the benefits of using the deformable simplicial complex method for image segmentation by segmenting an image into a known number of segments characterized by distinct mean pixel intensities.",
"fno": "5209b002",
"keywords": [
"Image Segmentation",
"Level Set",
"Topology",
"Image Edge Detection",
"Junctions",
"Deformable Models",
"Three Dimensional Displays"
],
"authors": [
{
"affiliation": null,
"fullName": "Vedrana Andersen Dahl",
"givenName": "Vedrana Andersen",
"surname": "Dahl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Asger Nyman Christiansen",
"givenName": "Asger Nyman",
"surname": "Christiansen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jakob Andreas Baerentzen",
"givenName": "Jakob Andreas",
"surname": "Baerentzen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1002-1007",
"year": "2014",
"issn": "1051-4651",
"isbn": "978-1-4799-5209-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5209a996",
"articleId": "12OmNCbCrIB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5209b008",
"articleId": "12OmNButq47",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscv/1995/7190/0/71900581",
"title": "Vehicle segmentation using deformable templates",
"doi": null,
"abstractUrl": "/proceedings-article/iscv/1995/71900581/12OmNArKSix",
"parentPublication": {
"id": "proceedings/iscv/1995/7190/0",
"title": "Computer Vision, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118a168",
"title": "Segmentation-Aware Deformable Part Models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a168/12OmNBSBk2U",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206524",
"title": "Multiphase geometric couplings for the segmentation of neural processes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206524/12OmNCh0Pck",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/1/01315072",
"title": "MetaMorphs: Deformable shape and texture models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315072/12OmNs5rkZr",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/1",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2011/4483/0/4483a134",
"title": "Cut Locus Construction Using Deformable Simplicial Complexes",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2011/4483a134/12OmNweBUFr",
"parentPublication": {
"id": "proceedings/isvd/2011/4483/0",
"title": "2011 Eighth International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mue/2009/3658/0/3658a041",
"title": "An Innovative Variational Level Set Model for Multiphase Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/mue/2009/3658a041/12OmNz3bdCk",
"parentPublication": {
"id": "proceedings/mue/2009/3658/0",
"title": "Multimedia and Ubiquitous Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761726",
"title": "Graph cut based deformable model with statistical shape priors",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761726/12OmNzvz6DN",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1996/03/i0293",
"title": "Vehicle Segmentation and Classification Using Deformable Templates",
"doi": null,
"abstractUrl": "/journal/tp/1996/03/i0293/13rRUwdIOT5",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050852",
"title": "Spring Level Sets: A Deformable Model Representation to Provide Interoperability between Meshes and Level Sets",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050852/13rRUwfZBVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2003/06/i0755",
"title": "A Topology Preserving Level Set Method for Geometric Deformable Models",
"doi": null,
"abstractUrl": "/journal/tp/2003/06/i0755/13rRUyuvRpS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzmclWu",
"title": "Ninth International Conference on Information Visualisation (IV'05)",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB1wkOc",
"doi": "10.1109/IV.2005.46",
"title": "Enhanced SIC (Synergistic Image Creator) for Artistic Use",
"normalizedTitle": "Enhanced SIC (Synergistic Image Creator) for Artistic Use",
"abstract": "We are studying non-photo realistic rendering (NPR) with the goal of developing an image expression tool to create artworks by the NPR technique. The purpose of most NPR researches is to simulate typical painting styles, so they focus on techniques to create brushstrokes. These superficially modified images cannot deviate far from the source photo. If a rendered image is seen from a distance, it is difficult to tell the difference from the source photo. This fact makes us realize that other important elements to add artistic expression to the photographs exist. We supposed that the distribution of bright and dark parts over the painting is essential and proposed the effective way to modify that. Our novel technique is based on knowledge about human optical illusions and basic visual design rules. We integrate this technique into our NPR system, which was used to make the artworks selected by SIGGRAPH 2002 2D art gallery. Furthermore, we present new features that allow artist to select appropriate area for extreme expressions, to create many types of brushstrokes, and to choose more than two types of brushstrokes for a single artwork.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We are studying non-photo realistic rendering (NPR) with the goal of developing an image expression tool to create artworks by the NPR technique. The purpose of most NPR researches is to simulate typical painting styles, so they focus on techniques to create brushstrokes. These superficially modified images cannot deviate far from the source photo. If a rendered image is seen from a distance, it is difficult to tell the difference from the source photo. This fact makes us realize that other important elements to add artistic expression to the photographs exist. We supposed that the distribution of bright and dark parts over the painting is essential and proposed the effective way to modify that. Our novel technique is based on knowledge about human optical illusions and basic visual design rules. We integrate this technique into our NPR system, which was used to make the artworks selected by SIGGRAPH 2002 2D art gallery. Furthermore, we present new features that allow artist to select appropriate area for extreme expressions, to create many types of brushstrokes, and to choose more than two types of brushstrokes for a single artwork.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We are studying non-photo realistic rendering (NPR) with the goal of developing an image expression tool to create artworks by the NPR technique. The purpose of most NPR researches is to simulate typical painting styles, so they focus on techniques to create brushstrokes. These superficially modified images cannot deviate far from the source photo. If a rendered image is seen from a distance, it is difficult to tell the difference from the source photo. This fact makes us realize that other important elements to add artistic expression to the photographs exist. We supposed that the distribution of bright and dark parts over the painting is essential and proposed the effective way to modify that. Our novel technique is based on knowledge about human optical illusions and basic visual design rules. We integrate this technique into our NPR system, which was used to make the artworks selected by SIGGRAPH 2002 2D art gallery. Furthermore, we present new features that allow artist to select appropriate area for extreme expressions, to create many types of brushstrokes, and to choose more than two types of brushstrokes for a single artwork.",
"fno": "23970903",
"keywords": [
"Non Photorealistic Rendering",
"CG"
],
"authors": [
{
"affiliation": "Tokyo Polytechnic University",
"fullName": "Atsushi Kasao",
"givenName": "Atsushi",
"surname": "Kasao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Japan Advanced Institute of Science and Technology",
"fullName": "Kazunori Miyata",
"givenName": "Kazunori",
"surname": "Miyata",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-07-01T00:00:00",
"pubType": "proceedings",
"pages": "903-911",
"year": "2005",
"issn": "1550-6037",
"isbn": "0-7695-2397-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "23970896",
"articleId": "12OmNywfKEC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "23970912",
"articleId": "12OmNBhZ4qO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2004/2140/0/21400293",
"title": "Interactive Point-Based Painterly Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2004/21400293/12OmNApcus6",
"parentPublication": {
"id": "proceedings/cw/2004/2140/0",
"title": "2004 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a475",
"title": "Recent Advances in the User Evaluation Methods and Studies of Non-Photorealistic Visualization and Rendering Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a475/12OmNBQkwZb",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2011/4419/0/4419a176",
"title": "On the Presentation of Byzantine Art in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2011/4419a176/12OmNBU1jQn",
"parentPublication": {
"id": "proceedings/vs-games/2011/4419/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2012/4778/0/4778a007",
"title": "Oriented Animal-mask Decoration Pattern Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2012/4778a007/12OmNx57HG3",
"parentPublication": {
"id": "proceedings/cgiv/2012/4778/0",
"title": "2012 Ninth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ettandgrs/2008/3563/2/3563b743",
"title": "A Method Based on Chinese Landscape Painting Style of Non-photorealistic Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ettandgrs/2008/3563b743/12OmNxdDFOQ",
"parentPublication": {
"id": "ettandgrs/2008/3563/2",
"title": "Education Technology and Training & Geoscience and Remote Sensing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-cg/2005/2473/0/24730537",
"title": "Stylized Glass Paintings for Non-Photorealistic Rendered scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730537/12OmNyGtjpA",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2004/2178/0/21780215",
"title": "Non-Photorealistic Outdoor Scene Rendering: Techniques and Application",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2004/21780215/12OmNzZmZtZ",
"parentPublication": {
"id": "proceedings/cgiv/2004/2178/0",
"title": "Proceedings. International Conference on Computer Graphics, Imaging and Visualization, 2004. CGIV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2007/01/c1013",
"title": "Stylized Rendering for Anatomic Visualization",
"doi": null,
"abstractUrl": "/magazine/cs/2007/01/c1013/13rRUwcAqmF",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/05/v1041",
"title": "Mixed Media Painting and Portraiture",
"doi": null,
"abstractUrl": "/journal/tg/2007/05/v1041/13rRUyYSWkU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/04/mcg2009040081",
"title": "Non-photorealistic Rendering: Unleashing the Artist's Imagination [Graphically Speaking]",
"doi": null,
"abstractUrl": "/magazine/cg/2009/04/mcg2009040081/13rRUzp02qq",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxWcH14",
"title": "2013 Ninth International Conference on Computational Intelligence and Security (CIS)",
"acronym": "cis",
"groupId": "1001517",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBqMDEL",
"doi": "10.1109/CIS.2013.120",
"title": "Wormhole Canvas",
"normalizedTitle": "Wormhole Canvas",
"abstract": "On physical canvas, color can only diffuse in the continuous 2D space. To break this constraint, this paper proposes wormhole canvas, on which the pervasive one-way wormholes can continually teleport color to distant locations. Various results show that different placements of wormholes can produce different stylized renderings. Wormhole canvas shows that NPR(Non-Photo realistic Rendering) can be the artistic media of the future, surpassing the capability of traditional artistic media.",
"abstracts": [
{
"abstractType": "Regular",
"content": "On physical canvas, color can only diffuse in the continuous 2D space. To break this constraint, this paper proposes wormhole canvas, on which the pervasive one-way wormholes can continually teleport color to distant locations. Various results show that different placements of wormholes can produce different stylized renderings. Wormhole canvas shows that NPR(Non-Photo realistic Rendering) can be the artistic media of the future, surpassing the capability of traditional artistic media.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "On physical canvas, color can only diffuse in the continuous 2D space. To break this constraint, this paper proposes wormhole canvas, on which the pervasive one-way wormholes can continually teleport color to distant locations. Various results show that different placements of wormholes can produce different stylized renderings. Wormhole canvas shows that NPR(Non-Photo realistic Rendering) can be the artistic media of the future, surpassing the capability of traditional artistic media.",
"fno": "06746488",
"keywords": [
"Media",
"Rendering Computer Graphics",
"Image Color Analysis",
"Educational Institutions",
"Paints",
"Painting",
"Three Dimensional Displays",
"Artistic Media",
"NPR",
"Paper Modeling",
"Wormhole"
],
"authors": [
{
"affiliation": null,
"fullName": "Ruimin Lyu",
"givenName": "Ruimin",
"surname": "Lyu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuan Liu",
"givenName": "Yuan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lei Meng",
"givenName": "Lei",
"surname": "Meng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xinyue Hu",
"givenName": "Xinyue",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "542-544",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2549-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06746487",
"articleId": "12OmNxuo0k4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06746489",
"articleId": "12OmNqGA5kI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icst/2018/5012/0/501201a193",
"title": "Web Canvas Testing Through Visual Inference",
"doi": null,
"abstractUrl": "/proceedings-article/icst/2018/501201a193/12OmNASravE",
"parentPublication": {
"id": "proceedings/icst/2018/5012/0",
"title": "2018 IEEE 11th International Conference on Software Testing, Verification and Validation (ICST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a790",
"title": "A Tracking Method for 2D Canvas in MR-Based Interactive Painting System",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a790/12OmNqFrGCH",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpp/2013/5117/0/5117a498",
"title": "WormPlanar: Topological Planarization Based Wormhole Detection in Wireless Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpp/2013/5117a498/12OmNrAv3JC",
"parentPublication": {
"id": "proceedings/icpp/2013/5117/0",
"title": "2013 42nd International Conference on Parallel Processing (ICPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097858",
"title": "Real-time and passive wormhole detection for wireless sensor networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097858/12OmNzZWbGB",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nss/2009/3838/0/3838a073",
"title": "DeWorm: A Simple Protocol to Detect Wormhole Attacks in Wireless Ad Hoc Networks",
"doi": null,
"abstractUrl": "/proceedings-article/nss/2009/3838a073/12OmNzdoN45",
"parentPublication": {
"id": "proceedings/nss/2009/3838/0",
"title": "Network and System Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07185456",
"title": "Visualization-by-Sketching: An Artist's Interface for Creating Multivariate Time-Varying Data Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07185456/13rRUIIVlcO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/03/mcg2017030070",
"title": "ColorSketch: A Drawing Assistant for Generating Color Sketches from Photos",
"doi": null,
"abstractUrl": "/magazine/cg/2017/03/mcg2017030070/13rRUwkfB20",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2005/12/l1120",
"title": "Solving Vector Consensus with a Wormhole",
"doi": null,
"abstractUrl": "/journal/td/2005/12/l1120/13rRUxBJhuX",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2016/3593/0/07982250",
"title": "Collaborative P2P Painting on a Shared Canvas",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2016/07982250/17D45VN31gW",
"parentPublication": {
"id": "proceedings/cse-euc/2016/3593/0",
"title": "2016 19th IEEE Intl Conference on Computational Science and Engineering (CSE), IEEE 14th Intl Conference on Embedded and Ubiquitous Computing (EUC), and 15th Intl Symposium on Distributed Computing and Applications for Business Engineering (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/5555/01/10109201",
"title": "A Taxonomy of Testable HTML5 Canvas Issues",
"doi": null,
"abstractUrl": "/journal/ts/5555/01/10109201/1METsmoEE9O",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC8dg90",
"title": "2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCeaPUM",
"doi": "10.1109/ICALT.2016.29",
"title": "Best Practices in WebQuest Design: Stimulating the Higher Levels of Bloom's Taxonomy",
"normalizedTitle": "Best Practices in WebQuest Design: Stimulating the Higher Levels of Bloom's Taxonomy",
"abstract": "Various methodologies explore the use of Web resources in educational practices. The WebQuests model, which has been used in educational practices since the nineties, employs these resources in inquiry-oriented lessons in classrooms. The literature emphasizes that WebQuests are more effective when exploring the higher levels of Bloom's Taxonomy (i.e., the capacity to create, evaluate and analyze). However, studies also point out that many WebQuests do not properly explore learning at these levels. This paper proposes a method and prototype for the construction of WebQuests aiming to stimulate the higher levels of Bloom's Taxonomy. This model is based on norm concept from Organizational Semiotics with the objective of proposing best practices to be considered during the WebQuest design. The article presents the evaluation of the model in an empirical study with three lessons performed with fifty-one primary (elementary) school students. The results reveal improved student performance in post lessons tests, as compared with randomly selected control groups. The paper ends with a discussion of the results, limitations, challenges and future research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Various methodologies explore the use of Web resources in educational practices. The WebQuests model, which has been used in educational practices since the nineties, employs these resources in inquiry-oriented lessons in classrooms. The literature emphasizes that WebQuests are more effective when exploring the higher levels of Bloom's Taxonomy (i.e., the capacity to create, evaluate and analyze). However, studies also point out that many WebQuests do not properly explore learning at these levels. This paper proposes a method and prototype for the construction of WebQuests aiming to stimulate the higher levels of Bloom's Taxonomy. This model is based on norm concept from Organizational Semiotics with the objective of proposing best practices to be considered during the WebQuest design. The article presents the evaluation of the model in an empirical study with three lessons performed with fifty-one primary (elementary) school students. The results reveal improved student performance in post lessons tests, as compared with randomly selected control groups. The paper ends with a discussion of the results, limitations, challenges and future research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Various methodologies explore the use of Web resources in educational practices. The WebQuests model, which has been used in educational practices since the nineties, employs these resources in inquiry-oriented lessons in classrooms. The literature emphasizes that WebQuests are more effective when exploring the higher levels of Bloom's Taxonomy (i.e., the capacity to create, evaluate and analyze). However, studies also point out that many WebQuests do not properly explore learning at these levels. This paper proposes a method and prototype for the construction of WebQuests aiming to stimulate the higher levels of Bloom's Taxonomy. This model is based on norm concept from Organizational Semiotics with the objective of proposing best practices to be considered during the WebQuest design. The article presents the evaluation of the model in an empirical study with three lessons performed with fifty-one primary (elementary) school students. The results reveal improved student performance in post lessons tests, as compared with randomly selected control groups. The paper ends with a discussion of the results, limitations, challenges and future research.",
"fno": "9041a391",
"keywords": [
"Computer Aided Instruction",
"Design",
"Educational Institutions",
"Internet",
"Web Quest Design",
"WQ Design",
"Bloom Taxonomy",
"Web Resource",
"Educational Practice",
"Inquiry Oriented Lesson",
"Organizational Semiotics",
"Primary School Student",
"Best Practices",
"Taxonomy",
"Prototypes",
"Semiotics",
"Analytical Models",
"Standards",
"Context",
"Web Quests",
"Blooms Taxonomy",
"Organizational Semiotics",
"Norms"
],
"authors": [
{
"affiliation": "Fac. of Campo Limpo Paulista, Mato Grosso State Univ., Campo Limpo Paulista, Brazil",
"fullName": "Sergio Santos Silva Filho",
"givenName": "Sergio Santos",
"surname": "Silva Filho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fac. of Campo Limpo Paulista, Center for Inf. Technol. Renato Archer(CTI), Campinas, Brazil",
"fullName": "Rodrigo Bonacin",
"givenName": "Rodrigo",
"surname": "Bonacin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "391-395",
"year": "2016",
"issn": "2161-377X",
"isbn": "978-1-4673-9041-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9041a386",
"articleId": "12OmNyqzLZE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9041a396",
"articleId": "12OmNx4gUy2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2015/8454/0/07344084",
"title": "Bloom's taxonomy in software engineering education: A systematic mapping study",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344084/12OmNBTJIHv",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2009/4715/0/05350496",
"title": "Discrete Mathematics assessment using learning objectives based on Bloom's taxonomy",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2009/05350496/12OmNBd9T4k",
"parentPublication": {
"id": "proceedings/fie/2009/4715/0",
"title": "2009 39th IEEE Frontiers in Education Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpc/2008/3176/0/3176a224",
"title": "Checklist Inspections and Modifications: Applying Bloom's Taxonomy to Categorise Developer Comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/icpc/2008/3176a224/12OmNBd9T5e",
"parentPublication": {
"id": "proceedings/icpc/2008/3176/0",
"title": "International Conference on Program Comprehension",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2009/4715/0/05350793",
"title": "Utilizing hands-on learning to facilitate progression through Bloom's taxonomy within the first semester",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2009/05350793/12OmNCctf8i",
"parentPublication": {
"id": "proceedings/fie/2009/4715/0",
"title": "2009 39th IEEE Frontiers in Education Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2013/5261/0/06684978",
"title": "A systems approach to managing learning based on Bloom's revised taxonomy to support student assessment in PBL",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2013/06684978/12OmNCgrD7i",
"parentPublication": {
"id": "proceedings/fie/2013/5261/0",
"title": "2013 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cseet/2009/3539/0/3539a232",
"title": "Evaluating Software Inspection Cognition Levels Using Bloom's Taxonomy",
"doi": null,
"abstractUrl": "/proceedings-article/cseet/2009/3539a232/12OmNyaGeIq",
"parentPublication": {
"id": "proceedings/cseet/2009/3539/0",
"title": "Software Engineering Education and Training, Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2017/5920/0/08190523",
"title": "”I wish I could rank my exam's challenge level!”: An algorithm of Bloom's taxonomy in teaching CS1",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2017/08190523/12OmNzTYBOc",
"parentPublication": {
"id": "proceedings/fie/2017/5920/0",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2006/0256/0/04116890",
"title": "Work in Progress - Using Bloom's Taxonomy as a Format for Self-Evaluation of Design Education Activities II",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2006/04116890/12OmNzlD9fc",
"parentPublication": {
"id": "proceedings/fie/2006/0256/0",
"title": "Proceedings. Frontiers in Education. 36th Annual Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a518",
"title": "Educational management in Critical Thinking Training Based on Bloom’s Taxonomy and SOLO Taxonomy",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a518/1tnYhqywf6M",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seaa/2021/2705/0/270500a138",
"title": "Towards a Taxonomy of Bug Tracking Process Smells: A Quantitative Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/seaa/2021/270500a138/1y2JA90Gtz2",
"parentPublication": {
"id": "proceedings/seaa/2021/2705/0",
"title": "2021 47th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzIUg01",
"title": "2009 Conference for Visual Media Production",
"acronym": "cvmp",
"groupId": "1003129",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwMob6P",
"doi": "10.1109/CVMP.2009.15",
"title": "Skin-Aware Stylization of Video Portraits",
"normalizedTitle": "Skin-Aware Stylization of Video Portraits",
"abstract": "This paper presents a new non-photorealistic/stroke-based rendering (NPR/SBR) framework for the stylization of videos featuring head shots of people, such as home videos, movies, and camera mobile phone clips. Spatiotemporal skin and edge detection are used to locate and emphasize the semantic content in the stylization process. The SBR portion of the algorithm features novel techniques for motion expression with elliptical brush strokes, brush stroke anchor point distribution, spatio-temporal color-sampling, and brush stroke animation with regard to state-of-the-art issues such as object occlusion and uncovering in the source video. A wide user-accessible parameter space and finishing touches such as cartoon-like edge decoration and other quirky effects empowers a variety of artistic outputs. The resulting stylized sequences are fun and interesting with regard to compression, summarization, motion visualization, story-boarding and art. Both the semantic content, and underlying video motion is highlighted and summarized on every frame of the stylized output sequence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a new non-photorealistic/stroke-based rendering (NPR/SBR) framework for the stylization of videos featuring head shots of people, such as home videos, movies, and camera mobile phone clips. Spatiotemporal skin and edge detection are used to locate and emphasize the semantic content in the stylization process. The SBR portion of the algorithm features novel techniques for motion expression with elliptical brush strokes, brush stroke anchor point distribution, spatio-temporal color-sampling, and brush stroke animation with regard to state-of-the-art issues such as object occlusion and uncovering in the source video. A wide user-accessible parameter space and finishing touches such as cartoon-like edge decoration and other quirky effects empowers a variety of artistic outputs. The resulting stylized sequences are fun and interesting with regard to compression, summarization, motion visualization, story-boarding and art. Both the semantic content, and underlying video motion is highlighted and summarized on every frame of the stylized output sequence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a new non-photorealistic/stroke-based rendering (NPR/SBR) framework for the stylization of videos featuring head shots of people, such as home videos, movies, and camera mobile phone clips. Spatiotemporal skin and edge detection are used to locate and emphasize the semantic content in the stylization process. The SBR portion of the algorithm features novel techniques for motion expression with elliptical brush strokes, brush stroke anchor point distribution, spatio-temporal color-sampling, and brush stroke animation with regard to state-of-the-art issues such as object occlusion and uncovering in the source video. A wide user-accessible parameter space and finishing touches such as cartoon-like edge decoration and other quirky effects empowers a variety of artistic outputs. The resulting stylized sequences are fun and interesting with regard to compression, summarization, motion visualization, story-boarding and art. Both the semantic content, and underlying video motion is highlighted and summarized on every frame of the stylized output sequence.",
"fno": "3893a035",
"keywords": [
"Video Signal Processing",
"Probability",
"Motion Analysis",
"Rendering",
"Animation"
],
"authors": [
{
"affiliation": null,
"fullName": "D. O'Regan",
"givenName": "D.",
"surname": "O'Regan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "A.C. Kokaram",
"givenName": "A.C.",
"surname": "Kokaram",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvmp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-11-01T00:00:00",
"pubType": "proceedings",
"pages": "35-44",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3893-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3893a025",
"articleId": "12OmNyKa6fp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3893a045",
"articleId": "12OmNBoNrrq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032b114",
"title": "Coherent Online Video Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b114/12OmNB9t6tQ",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/01/ttg2012010058",
"title": "Video Painting Based on a Stabilized Time-Varying Flow Field",
"doi": null,
"abstractUrl": "/journal/tg/2012/01/ttg2012010058/13rRUwInv4m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010074",
"title": "Video Painting with Space-Time-Varying Style Parameters",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010074/13rRUxAAT0N",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/01/v0061",
"title": "Stylized and Abstract Painterly Rendering System Using a Multiscale Segmented Sphere Hierarchy",
"doi": null,
"abstractUrl": "/journal/tg/2006/01/v0061/13rRUxBJhFm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050866",
"title": "State of the \"Art”: A Taxonomy of Artistic Stylization Techniques for Images and Video",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050866/13rRUxBa561",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/06/07920337",
"title": "Real-Time Video Stylization Using Object Flows",
"doi": null,
"abstractUrl": "/journal/tg/2018/06/07920337/13rRUxC0SEl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030475",
"title": "AniPaint: Interactive Painterly Animation from Video",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030475/13rRUygT7sB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08113507",
"title": "Animated Construction of Chinese Brush Paintings",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08113507/14H4WNjKxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300b467",
"title": "Attention-Aware Multi-Stroke Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300b467/1gyrAK80fHq",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09547845",
"title": "Exemplar-Based 3D Portrait Stylization",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09547845/1x9TLh9tiow",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNscfI2H",
"title": "2009 Fifth International Conference on IT Security Incident Management and IT Forensics",
"acronym": "imf",
"groupId": "1002902",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyvGyk7",
"doi": "10.1109/IMF.2009.17",
"title": "From the Computer Incident Taxonomy to a Computer Forensic Examination Taxonomy",
"normalizedTitle": "From the Computer Incident Taxonomy to a Computer Forensic Examination Taxonomy",
"abstract": "Forensic investigations are usually conducted to solve crimes committed using IT-systems as pertetrator and/or victim. However, depending on the size of IT-system, also non-malicious incidents can be investigated using the same, methodological and proven techniques. Based upon the principles contained in the well-known Computer Incident Taxonomy [1], this paper proposes the establishment a common language for the description of computer forensic examinations, both in malicious and non-malicious incidents. Additionally this taxonomy helps performing a forensic examination in establishing answers to a set of well-definied questions during such an examination. The usefulness of the proposed Forensic Examination Taxonomy is shown using a malicious and a non-malicious example.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Forensic investigations are usually conducted to solve crimes committed using IT-systems as pertetrator and/or victim. However, depending on the size of IT-system, also non-malicious incidents can be investigated using the same, methodological and proven techniques. Based upon the principles contained in the well-known Computer Incident Taxonomy [1], this paper proposes the establishment a common language for the description of computer forensic examinations, both in malicious and non-malicious incidents. Additionally this taxonomy helps performing a forensic examination in establishing answers to a set of well-definied questions during such an examination. The usefulness of the proposed Forensic Examination Taxonomy is shown using a malicious and a non-malicious example.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Forensic investigations are usually conducted to solve crimes committed using IT-systems as pertetrator and/or victim. However, depending on the size of IT-system, also non-malicious incidents can be investigated using the same, methodological and proven techniques. Based upon the principles contained in the well-known Computer Incident Taxonomy [1], this paper proposes the establishment a common language for the description of computer forensic examinations, both in malicious and non-malicious incidents. Additionally this taxonomy helps performing a forensic examination in establishing answers to a set of well-definied questions during such an examination. The usefulness of the proposed Forensic Examination Taxonomy is shown using a malicious and a non-malicious example.",
"fno": "3807a054",
"keywords": [
"Computer Security",
"Taxonomy",
"IT Forensics"
],
"authors": [
{
"affiliation": null,
"fullName": "Robert Altschaffel",
"givenName": "Robert",
"surname": "Altschaffel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stefan Kiltz",
"givenName": "Stefan",
"surname": "Kiltz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jana Dittmann",
"givenName": "Jana",
"surname": "Dittmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "imf",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "54-68",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3807-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3807a041",
"articleId": "12OmNrAv3Xg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3807a069",
"articleId": "12OmNApLGNB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sadfe/2009/3792/0/3792a042",
"title": "Two Models of Digital Forensic Examination",
"doi": null,
"abstractUrl": "/proceedings-article/sadfe/2009/3792a042/12OmNASILI6",
"parentPublication": {
"id": "proceedings/sadfe/2009/3792/0",
"title": "Systematic Approaches to Digital Forensic Engineering, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imf/2011/4403/0/4403a092",
"title": "A Common Scheme for Evaluation of Forensic Software",
"doi": null,
"abstractUrl": "/proceedings-article/imf/2011/4403a092/12OmNC4wtHw",
"parentPublication": {
"id": "proceedings/imf/2011/4403/0",
"title": "2011 Sixth International Conference on IT Security Incident Management and IT Forensics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sadfe/2011/4642/0/4642a0003",
"title": "Protecting Digital Data Privacy in Computer Forensic Examination",
"doi": null,
"abstractUrl": "/proceedings-article/sadfe/2011/4642a0003/12OmNrK9q3d",
"parentPublication": {
"id": "proceedings/sadfe/2011/4642/0",
"title": "Systematic Approaches to Digital Forensic Engineering, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ngmast/2010/4121/0/4121a025",
"title": "An Analysis of the Digital Forensic Examination of Mobile Phones",
"doi": null,
"abstractUrl": "/proceedings-article/ngmast/2010/4121a025/12OmNrYCXUw",
"parentPublication": {
"id": "proceedings/ngmast/2010/4121/0",
"title": "Next Generation Mobile Applications, Services and Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictcs/2017/0527/0/0527a144",
"title": "State of the Art in Computer Forensic Education-A Review of Computer Forensic Programmes in the UK, Europe and US",
"doi": null,
"abstractUrl": "/proceedings-article/ictcs/2017/0527a144/12OmNxWLTmM",
"parentPublication": {
"id": "proceedings/ictcs/2017/0527/0",
"title": "2017 International Conference on New Trends in Computing Sciences (ICTCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2016/3207/0/3207a609",
"title": "Understanding Anti-forensic Techniques with Timestamp Manipulation (Invited Paper)",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2016/3207a609/12OmNynJMK8",
"parentPublication": {
"id": "proceedings/iri/2016/3207/0",
"title": "2016 IEEE 17th International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363337",
"title": "Counterfeiting and Defending the Digital Forensic Process",
"doi": null,
"abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363337/12OmNz5JBQY",
"parentPublication": {
"id": "proceedings/cit-iucc-dasc-picom/2015/0154/0",
"title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom-bigdatase-i-spa/2015/7952/1/07345396",
"title": "Android Cache Taxonomy and Forensic Process",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2015/07345396/12OmNz5JCeo",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase-i-spa/2015/7952/2",
"title": "2015 IEEE Trustcom/BigDataSE/ISPA",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cd/2015/01/mcd2015010014",
"title": "Cloud Attack and Risk Assessment Taxonomy",
"doi": null,
"abstractUrl": "/magazine/cd/2015/01/mcd2015010014/13rRUx0gehn",
"parentPublication": {
"id": "mags/cd",
"title": "IEEE Cloud Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/12/mco2012120044",
"title": "SCADA Systems: Challenges for Forensic Investigators",
"doi": null,
"abstractUrl": "/magazine/co/2012/12/mco2012120044/13rRUxC0SHB",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrkjVqA",
"title": "2015 IEEE Scientific Visualization Conference (SciVis)",
"acronym": "scivis",
"groupId": "1811924",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzmLxKh",
"doi": "10.1109/SciVis.2015.7429511",
"title": "A proposed multivariate visualization taxonomy from user data",
"normalizedTitle": "A proposed multivariate visualization taxonomy from user data",
"abstract": "We revisited past user study data on multivariate visualizations, looking at whether image processing measures offer any insight into user performance. While we find statistically significant correlations, some of the greatest insights into user performance came from variables that have strong ties to two key properties of mul-tivariate representations. We discuss our analysis and propose a taxonomy of multivariate visualizations that arises.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We revisited past user study data on multivariate visualizations, looking at whether image processing measures offer any insight into user performance. While we find statistically significant correlations, some of the greatest insights into user performance came from variables that have strong ties to two key properties of mul-tivariate representations. We discuss our analysis and propose a taxonomy of multivariate visualizations that arises.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We revisited past user study data on multivariate visualizations, looking at whether image processing measures offer any insight into user performance. While we find statistically significant correlations, some of the greatest insights into user performance came from variables that have strong ties to two key properties of mul-tivariate representations. We discuss our analysis and propose a taxonomy of multivariate visualizations that arises.",
"fno": "07429511",
"keywords": [
"Data Visualization",
"Taxonomy",
"Image Color Analysis",
"Data Analysis",
"Visualization",
"Correlation",
"User Interfaces Screen Design",
"H 5 2 Information Interfaces And Presentation User Interfaces Evaluation Methodology",
"H 5 2 Information Interfaces And Presentation"
],
"authors": [
{
"affiliation": null,
"fullName": "Mark A. Livingston",
"givenName": "Mark A.",
"surname": "Livingston",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jonathan W. Decker",
"givenName": "Jonathan W.",
"surname": "Decker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhuming Ai",
"givenName": "Zhuming",
"surname": "Ai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "scivis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-10-01T00:00:00",
"pubType": "proceedings",
"pages": "157-158",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9785-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07429510",
"articleId": "12OmNBZHijx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07429512",
"articleId": "12OmNzUgcXK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ssdm/1994/6610/0/00336963",
"title": "Comparative multivariate visualization across conceptually different graphic displays",
"doi": null,
"abstractUrl": "/proceedings-article/ssdm/1994/00336963/12OmNAXxX3r",
"parentPublication": {
"id": "proceedings/ssdm/1994/6610/0",
"title": "Seventh International Working Conference on Scientific and Statistical Database Management",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2017/5738/0/08031588",
"title": "Multivariate volumetric data analysis and visualization through bottom-up subspace exploration",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031588/12OmNAkWvnC",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400552",
"title": "Watch this: A taxonomy for dynamic data visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400552/12OmNxYtu2A",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07185456",
"title": "Visualization-by-Sketching: An Artist's Interface for Creating Multivariate Time-Varying Data Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07185456/13rRUIIVlcO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875989",
"title": "The relation between visualization size, grouping, and user performance",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875989/13rRUwcAqqk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061053",
"title": "Visualization of Diversity in Large Multivariate Data Sets",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061053/13rRUwd9CG0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539320",
"title": "Decal-Maps: Real-Time Layering of Decals on Surfaces for Multivariate Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539320/13rRUx0gezV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122310",
"title": "Flexible Linked Axes for Multivariate Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122310/13rRUxD9h54",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2021/3931/0/393100a136",
"title": "On the Visualization of Hierarchical Multivariate Data",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2021/393100a136/1tTtq0XzUHu",
"parentPublication": {
"id": "proceedings/pacificvis/2021/3931/0",
"title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/06/09556564",
"title": "A Taxonomy-Driven Model for Designing Educational Games in Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2021/06/09556564/1xlw4DK3GXC",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyfdOIW",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBubOX9",
"doi": "10.1109/3DUI.2012.6184187",
"title": "Democratizing rendering for multiple viewers in surround VR systems",
"normalizedTitle": "Democratizing rendering for multiple viewers in surround VR systems",
"abstract": "We present a new approach for how multiple users' views can be rendered in a surround virtual environment without using special multi-view hardware. It is based on the idea that different parts of the screen are often viewed by different users, so that they can be rendered from their own view point, or at least from a point closer to their view point than traditionally expected. The vast majority of 3D virtual reality systems are designed for one head-tracked user, and a number of passive viewers. Only the head tracked user gets to see the correct view of the scene, everybody else sees a distorted image. We reduce this problem by algorithmically democratizing the rendering view point among all tracked users. Researchers have proposed solutions for multiple tracked users, but most of them require major changes to the display hardware of the VR system, such as additional projectors or custom VR glasses. Our approach does not require additional hardware, except the ability to track each participating user. We propose three versions of our multi-viewer algorithm. Each of them balances image distortion and frame rate in different ways, making them more or less suitable for certain application scenarios. Our most sophisticated algorithm renders each pixel from its own, optimized camera perspective, which depends on all tracked users' head positions and orientations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a new approach for how multiple users' views can be rendered in a surround virtual environment without using special multi-view hardware. It is based on the idea that different parts of the screen are often viewed by different users, so that they can be rendered from their own view point, or at least from a point closer to their view point than traditionally expected. The vast majority of 3D virtual reality systems are designed for one head-tracked user, and a number of passive viewers. Only the head tracked user gets to see the correct view of the scene, everybody else sees a distorted image. We reduce this problem by algorithmically democratizing the rendering view point among all tracked users. Researchers have proposed solutions for multiple tracked users, but most of them require major changes to the display hardware of the VR system, such as additional projectors or custom VR glasses. Our approach does not require additional hardware, except the ability to track each participating user. We propose three versions of our multi-viewer algorithm. Each of them balances image distortion and frame rate in different ways, making them more or less suitable for certain application scenarios. Our most sophisticated algorithm renders each pixel from its own, optimized camera perspective, which depends on all tracked users' head positions and orientations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a new approach for how multiple users' views can be rendered in a surround virtual environment without using special multi-view hardware. It is based on the idea that different parts of the screen are often viewed by different users, so that they can be rendered from their own view point, or at least from a point closer to their view point than traditionally expected. The vast majority of 3D virtual reality systems are designed for one head-tracked user, and a number of passive viewers. Only the head tracked user gets to see the correct view of the scene, everybody else sees a distorted image. We reduce this problem by algorithmically democratizing the rendering view point among all tracked users. Researchers have proposed solutions for multiple tracked users, but most of them require major changes to the display hardware of the VR system, such as additional projectors or custom VR glasses. Our approach does not require additional hardware, except the ability to track each participating user. We propose three versions of our multi-viewer algorithm. Each of them balances image distortion and frame rate in different ways, making them more or less suitable for certain application scenarios. Our most sophisticated algorithm renders each pixel from its own, optimized camera perspective, which depends on all tracked users' head positions and orientations.",
"fno": "06184187",
"keywords": [
"Head",
"Heuristic Algorithms",
"Cameras",
"Rendering Computer Graphics",
"Target Tracking",
"Hardware",
"Three Dimensional Displays",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality",
"I 3 3 Computer Graphics Picture Image Generation Display Algorithms"
],
"authors": [
{
"affiliation": "Univ. of California San Diego, La Jolla, CA, USA",
"fullName": "J. P. Schulze",
"givenName": "J. P.",
"surname": "Schulze",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "King Abdullah Univ. of Sci. & Technol., Thuwal, Saudi Arabia",
"fullName": "D. Acevedo",
"givenName": "D.",
"surname": "Acevedo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of California San Diego, La Jolla, CA, USA",
"fullName": "J. Mangan",
"givenName": "J.",
"surname": "Mangan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of California San Diego, La Jolla, CA, USA",
"fullName": "A. Prudhomme",
"givenName": "A.",
"surname": "Prudhomme",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of California San Diego, La Jolla, CA, USA",
"fullName": "P. Nguyen",
"givenName": "P.",
"surname": "Nguyen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of California San Diego, La Jolla, CA, USA",
"fullName": "P. Weber",
"givenName": "P.",
"surname": "Weber",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-03-01T00:00:00",
"pubType": "proceedings",
"pages": "77-80",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1204-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06184186",
"articleId": "12OmNx5piWT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06184188",
"articleId": "12OmNwF0BUi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2009/3943/0/04810998",
"title": "Image Blending and View Clustering for Multi-Viewer Immersive Projection Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810998/12OmNCfSqFi",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550220",
"title": "Poster: Head motion transmission based on center of rotation",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550220/12OmNqJ8tlm",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892386",
"title": "Travel in large-scale head-worn VR: Pre-oriented teleportation with WIMs and previews",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892386/12OmNzhELm6",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446247",
"title": "Concept for Rendering Optimizations for Full Human Field of View HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446247/13bd1eY1x3i",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2002/02/v0129",
"title": "A Geometric Comparison of Algorithms for Fusion Control in Stereoscopic HTDs",
"doi": null,
"abstractUrl": "/journal/tg/2002/02/v0129/13rRUwbs2aS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050886",
"title": "Studying the Effects of Stereo, Head Tracking, and Field of Regard on a Small-Scale Spatial Judgment Task",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050886/13rRUwh80uy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/06/07254213",
"title": "A Multi-Task Learning Framework for Head Pose Estimation under Target Motion",
"doi": null,
"abstractUrl": "/journal/tp/2016/06/07254213/13rRUy0HYL3",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09736631",
"title": "On Rotation Gains Within and Beyond Perceptual Limitations for Seated VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09736631/1BN1UtLinTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a404",
"title": "Comparing the Fidelity of Contemporary Pointing with Controller Interactions on Performance of Personal Space Target Selection",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a404/1JrRlimqMKc",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09462341",
"title": "Evaluation of a Low-Cost Virtual Reality Surround-Screen Projection System",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09462341/1uDSAs8QPV6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNASrawz",
"title": "2009 IEEE Virtual Reality Conference",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCfSqFi",
"doi": "10.1109/VR.2009.4810998",
"title": "Image Blending and View Clustering for Multi-Viewer Immersive Projection Environments",
"normalizedTitle": "Image Blending and View Clustering for Multi-Viewer Immersive Projection Environments",
"abstract": "Investment into multi-wall immersive virtual environments is often motivated by the potential for small groups of users to work collaboratively, yet most systems only allow for stereographic rendering from a single viewpoint. This paper discusses approaches for supporting copresent head-tracked users in an immersive projection environment, such as the CAVEtrade, without relying on additional projection and frame-multiplexing technology. The primary technique presented here is called image blending and consists of rendering independent views for each head-tracked user to an off-screen buffer and blending the images into a final composite view using view-vector incidence angles as weighting factors. Additionally, users whose view-vectors intersect a projection screen at similar locations are grouped into a view-cluster. Clustered user views are rendered from the average head position and orientation of all users in that cluster. The clustering approach minimizes users' exposure to undesirable display artifacts such as inverted stereo pairs and nonlinear object projections by distributing projection error over all tracked viewers. These techniques have the added advantage that they can be easily integrated into existing systems with minimally increased hardware and software requirements. We compare image blending and view clustering with previously published techniques and discuss possible implementation optimizations and their tradeoffs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Investment into multi-wall immersive virtual environments is often motivated by the potential for small groups of users to work collaboratively, yet most systems only allow for stereographic rendering from a single viewpoint. This paper discusses approaches for supporting copresent head-tracked users in an immersive projection environment, such as the CAVEtrade, without relying on additional projection and frame-multiplexing technology. The primary technique presented here is called image blending and consists of rendering independent views for each head-tracked user to an off-screen buffer and blending the images into a final composite view using view-vector incidence angles as weighting factors. Additionally, users whose view-vectors intersect a projection screen at similar locations are grouped into a view-cluster. Clustered user views are rendered from the average head position and orientation of all users in that cluster. The clustering approach minimizes users' exposure to undesirable display artifacts such as inverted stereo pairs and nonlinear object projections by distributing projection error over all tracked viewers. These techniques have the added advantage that they can be easily integrated into existing systems with minimally increased hardware and software requirements. We compare image blending and view clustering with previously published techniques and discuss possible implementation optimizations and their tradeoffs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Investment into multi-wall immersive virtual environments is often motivated by the potential for small groups of users to work collaboratively, yet most systems only allow for stereographic rendering from a single viewpoint. This paper discusses approaches for supporting copresent head-tracked users in an immersive projection environment, such as the CAVEtrade, without relying on additional projection and frame-multiplexing technology. The primary technique presented here is called image blending and consists of rendering independent views for each head-tracked user to an off-screen buffer and blending the images into a final composite view using view-vector incidence angles as weighting factors. Additionally, users whose view-vectors intersect a projection screen at similar locations are grouped into a view-cluster. Clustered user views are rendered from the average head position and orientation of all users in that cluster. The clustering approach minimizes users' exposure to undesirable display artifacts such as inverted stereo pairs and nonlinear object projections by distributing projection error over all tracked viewers. These techniques have the added advantage that they can be easily integrated into existing systems with minimally increased hardware and software requirements. We compare image blending and view clustering with previously published techniques and discuss possible implementation optimizations and their tradeoffs.",
"fno": "04810998",
"keywords": [
"Groupware",
"Image Processing",
"Rendering Computer Graphics",
"Virtual Reality",
"Image Blending",
"View Clustering",
"Multiviewer Immersive Projection Environments",
"Multiwall Immersive Virtual Environments",
"Collaborative Work",
"Stereographic Rendering",
"CAVE",
"Frame Multiplexing Technology",
"Rendering Computer Graphics",
"Collaboration",
"Collaborative Work",
"Hardware",
"Virtual Reality",
"Clustering Algorithms",
"Head",
"Polarization",
"Layout",
"Geology",
"Immersive Virtual Reality",
"Multi Viewer Images",
"Collaboration",
"Geometry Shader",
"I 3 3 Picture Image Generation Display Algorithms",
"Viewing Algorithms",
"I 3 7 Three Dimensional Graphics And Realism Virtual Reality"
],
"authors": [
{
"affiliation": "Computer Science Department, University of Colorado at Boulder TerraSpark Geosciences, L. P. marbach@colorado.edu, jon@terraspark.com",
"fullName": "Jonathan Marbach",
"givenName": "Jonathan",
"surname": "Marbach",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-03-01T00:00:00",
"pubType": "proceedings",
"pages": "51-54",
"year": "2009",
"issn": "1087-8270",
"isbn": "978-1-4244-3943-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04810997",
"articleId": "1t2n7JaRIQw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04810999",
"articleId": "12OmNzvhvIR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2010/7029/0/05543463",
"title": "Dynamic projection environments for immersive visualization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543463/12OmNqI04Pu",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492760",
"title": "Multi-viewpoint images for multi-user interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492760/12OmNvFHfF6",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504727",
"title": "Head mounted projection for enhanced gaze in social interactions",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504727/12OmNwpGgKa",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ems/2016/4971/0/07920244",
"title": "View-dependent Virtual and Augmented Reality for Machine Tools",
"doi": null,
"abstractUrl": "/proceedings-article/ems/2016/07920244/12OmNwt5sgQ",
"parentPublication": {
"id": "proceedings/ems/2016/4971/0",
"title": "2016 European Modelling Symposium (EMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2002/1784/0/17840145",
"title": "Blending Multiple Views",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840145/12OmNzDehbe",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480780",
"title": "Inexpensive Immersive Projection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480780/12OmNzQR1rP",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446222",
"title": "A Method of View-Dependent Stereoscopic Projection on Curved Screen",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446222/13bd1gCd7Sx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/01/v0026",
"title": "Usability of Multiviewpoint Images for Spatial Interaction in Projection-Based Display Systems",
"doi": null,
"abstractUrl": "/journal/tg/2007/01/v0026/13rRUxNEqPG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811057",
"title": "Immersive Rear Projection on Curved Screens",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811057/1lssAh0wwUg",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyGbI4V",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs0TKW6",
"doi": "10.1109/ICME.2016.7552858",
"title": "Depth augmented stereo panorama for cinematic virtual reality with head-motion parallax",
"normalizedTitle": "Depth augmented stereo panorama for cinematic virtual reality with head-motion parallax",
"abstract": "Cinematic virtual reality (VR) aims to provide immersive visual experiences of real-world scenes on head-mounted displays. Current cinematic VR systems employ omnidirectional stereo videos from a fixed position, and therefore do not address head-motion parallax, which is an important cue for depth perception. We propose a new 3D video representation, referred to as depth augmented stereo panorama (DASP), to address this issue. DASP is developed considering data capture, postproduction, streaming, and rendering stages of the VR pipeline. The capabilities of this representation are evaluated by comparing the generated viewports with those from known 3D models. Results indicate that DASP can successfully create stereo and induce head-motion parallax in a predefined operating range.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cinematic virtual reality (VR) aims to provide immersive visual experiences of real-world scenes on head-mounted displays. Current cinematic VR systems employ omnidirectional stereo videos from a fixed position, and therefore do not address head-motion parallax, which is an important cue for depth perception. We propose a new 3D video representation, referred to as depth augmented stereo panorama (DASP), to address this issue. DASP is developed considering data capture, postproduction, streaming, and rendering stages of the VR pipeline. The capabilities of this representation are evaluated by comparing the generated viewports with those from known 3D models. Results indicate that DASP can successfully create stereo and induce head-motion parallax in a predefined operating range.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cinematic virtual reality (VR) aims to provide immersive visual experiences of real-world scenes on head-mounted displays. Current cinematic VR systems employ omnidirectional stereo videos from a fixed position, and therefore do not address head-motion parallax, which is an important cue for depth perception. We propose a new 3D video representation, referred to as depth augmented stereo panorama (DASP), to address this issue. DASP is developed considering data capture, postproduction, streaming, and rendering stages of the VR pipeline. The capabilities of this representation are evaluated by comparing the generated viewports with those from known 3D models. Results indicate that DASP can successfully create stereo and induce head-motion parallax in a predefined operating range.",
"fno": "07552858",
"keywords": [
"Three Dimensional Displays",
"Videos",
"Head",
"Rendering Computer Graphics",
"Solid Modeling",
"Cameras",
"Pipelines",
"Head Motion Parallax",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Department of Electrical Engineering, Stanford University",
"fullName": "Jayant Thatte",
"givenName": "Jayant",
"surname": "Thatte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electrical Engineering, Stanford University",
"fullName": "Jean-Baptiste Boin",
"givenName": "Jean-Baptiste",
"surname": "Boin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electrical Engineering, Stanford University",
"fullName": "Haricharan Lakshman",
"givenName": "Haricharan",
"surname": "Lakshman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electrical Engineering, Stanford University",
"fullName": "Bernd Girod",
"givenName": "Bernd",
"surname": "Girod",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2016",
"issn": "1945-788X",
"isbn": "978-1-4673-7258-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07552856",
"articleId": "12OmNy2rS1s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07552859",
"articleId": "12OmNwK7o8C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836475",
"title": "Enhancing Immersive Cinematic Experience with Augmented Virtuality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836475/12OmNCm7BFH",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2008/3381/0/04741389",
"title": "Runtime Baseline Adjustment for Stereo Panorama",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/04741389/12OmNwwd2OM",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260916",
"title": "Parallax360: Stereoscopic 360° Scene Representation for Head-Motion Parallax",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260916/13rRUyp7tX1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08661657",
"title": "Motion parallax for 360° RGBD video",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08661657/18bmQqdj3Nu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798189",
"title": "Interaction Techniques for Cinematic Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798189/1cJ0GcCwwO4",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300m2242",
"title": "Learning Parallax Attention for Stereo Image Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300m2242/1gyrhiM5ZvO",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090650",
"title": "Depth Augmented Omnidirectional Stereo for 6-DoF VR Photography",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090650/1jIxi5ANPS8",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090635",
"title": "Potential Effects of Dynamic Parallax on Eyesight in Virtual Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090635/1jIxu8voDlu",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/04/09435993",
"title": "Real-World Virtual Reality With Head-Motion Parallax",
"doi": null,
"abstractUrl": "/magazine/cg/2021/04/09435993/1tJsoJKysqk",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09664291",
"title": "EHTask: Recognizing User Tasks From Eye and Head Movements in Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09664291/1zHDIPIlNBe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrNh0uF",
"title": "2012 Ninth Conference on Computer and Robot Vision",
"acronym": "crv",
"groupId": "1001794",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwE9ORM",
"doi": "10.1109/CRV.2012.24",
"title": "Coarse Head Pose Estimation using Image Abstraction",
"normalizedTitle": "Coarse Head Pose Estimation using Image Abstraction",
"abstract": "We present an algorithm to estimate the pose of a human head from a single image. It builds on the fact that only a limited set of cues are required to estimate human head pose and that most images contain far too many details than what are required for this task. Thus, non-photorealistic rendering is first used to eliminate irrelevant details from the picture and accentuate facial features critical to estimating head pose. The maximum likelihood pose range is then estimated by training a classifier on scaled down abstracted images. This algorithm covers a wide range of head orientations, can be used at various image resolutions, does not need personalized initialization, and is also relatively insensitive to illumination. Moreover, the facts that it performs competitively when compared with other state of the art methods and that it is fast enough to be used in real time systems make it a promising method for coarse head pose estimation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an algorithm to estimate the pose of a human head from a single image. It builds on the fact that only a limited set of cues are required to estimate human head pose and that most images contain far too many details than what are required for this task. Thus, non-photorealistic rendering is first used to eliminate irrelevant details from the picture and accentuate facial features critical to estimating head pose. The maximum likelihood pose range is then estimated by training a classifier on scaled down abstracted images. This algorithm covers a wide range of head orientations, can be used at various image resolutions, does not need personalized initialization, and is also relatively insensitive to illumination. Moreover, the facts that it performs competitively when compared with other state of the art methods and that it is fast enough to be used in real time systems make it a promising method for coarse head pose estimation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an algorithm to estimate the pose of a human head from a single image. It builds on the fact that only a limited set of cues are required to estimate human head pose and that most images contain far too many details than what are required for this task. Thus, non-photorealistic rendering is first used to eliminate irrelevant details from the picture and accentuate facial features critical to estimating head pose. The maximum likelihood pose range is then estimated by training a classifier on scaled down abstracted images. This algorithm covers a wide range of head orientations, can be used at various image resolutions, does not need personalized initialization, and is also relatively insensitive to illumination. Moreover, the facts that it performs competitively when compared with other state of the art methods and that it is fast enough to be used in real time systems make it a promising method for coarse head pose estimation.",
"fno": "4683a125",
"keywords": [
"Rendering Computer Graphics",
"Face Recognition",
"Image Classification",
"Image Resolution",
"Maximum Likelihood Estimation",
"Pose Estimation",
"Real Time System",
"Coarse Head Pose Estimation",
"Image Abstraction",
"Human Head Pose Estimation",
"Nonphotorealistic Rendering",
"Facial Feature",
"Maximum Likelihood Pose Range Estimation",
"Classifier",
"Head Orientation",
"Image Resolution",
"Head",
"Image Segmentation",
"Magnetic Heads",
"Rendering Computer Graphics",
"Estimation",
"Training",
"Image Edge Detection",
"Non Photorealistic Rendering",
"Head Pose"
],
"authors": [
{
"affiliation": "Samsung India Software Oper., Bangalore, India",
"fullName": "H. Kannan",
"givenName": "H.",
"surname": "Kannan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Inst. of Technol., Delhi, India",
"fullName": "P. Kalra",
"givenName": "P.",
"surname": "Kalra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Inst. of Technol., Delhi, India",
"fullName": "A. V. Puri",
"givenName": "A. V.",
"surname": "Puri",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "crv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-05-01T00:00:00",
"pubType": "proceedings",
"pages": "125-130",
"year": "2012",
"issn": null,
"isbn": "978-0-7695-4683-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4683a117",
"articleId": "12OmNy68ECu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4683a131",
"articleId": "12OmNwdbV3v",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/motion/2002/1860/0/18600125",
"title": "Comparative Study of Coarse Head Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/motion/2002/18600125/12OmNAGw13Q",
"parentPublication": {
"id": "proceedings/motion/2002/1860/0",
"title": "Proceedings Workshop on Motion and Video Computing (MOTION 2002)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a794",
"title": "3D Head Pose Estimation Based on Scene Flow and Generic Head Model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a794/12OmNqGitTB",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2008/3454/0/3454a592",
"title": "Supervised Learning for Head Pose Estimation Using SVD and Gabor Wavelets",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2008/3454a592/12OmNvs4vpg",
"parentPublication": {
"id": "proceedings/ism/2008/3454/0",
"title": "2008 Tenth IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/amfg/2003/2010/0/20100092",
"title": "Absolute Head Pose Estimation From Overhead Wide-Angle Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/amfg/2003/20100092/12OmNyen1y9",
"parentPublication": {
"id": "proceedings/amfg/2003/2010/0",
"title": "2003 IEEE International Workshop on Analysis and Modeling of Faces and Gestures",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09666992",
"title": "Relative Pose Consistency for Semi-Supervised Head Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09666992/1A6BGyUQ4yk",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d341",
"title": "HHP-Net: A light Heteroscedastic neural network for Head Pose estimation with uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d341/1B12GVZhWCY",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/insai/2021/0859/0/085900a244",
"title": "Head Pose Estimation of Stroke Patients Based on Depth Residual Network",
"doi": null,
"abstractUrl": "/proceedings-article/insai/2021/085900a244/1CHx1ui30ZO",
"parentPublication": {
"id": "proceedings/insai/2021/0859/0",
"title": "2021 International Conference on Networking Systems of AI (INSAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700a787",
"title": "A survey of head pose estimation methods",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700a787/1pVHmSe7k7m",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0",
"title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a109",
"title": "Comparing Head and AR Glasses Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a109/1yeQMONGc9y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAYoKmw",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNylbovt",
"doi": "10.1109/ISMAR.2013.6671800",
"title": "Subtle cueing for visual search in head-tracked head worn displays",
"normalizedTitle": "Subtle cueing for visual search in head-tracked head worn displays",
"abstract": "Goal-oriented visual search in augmented reality can be facilitated by using visual cues to call attention to a target. However, traditional use of explicit cues can degrade visual search performance due to scene distortion, occlusion and addition of visual clutter. In contrast, Subtle Cueing has been previously proposed as an alter-native to explicit cueing, but little is known about how well it works for head-tracked head worn displays (HWDs). We investigated the effect of Subtle Cueing for head-tracked head worn displays, using visual search research methods in simulated augmented reality environments. Our user study found that Subtle Cueing improves visual search performance, and serves as a feasible cueing mechanism for AR environments using HWDs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Goal-oriented visual search in augmented reality can be facilitated by using visual cues to call attention to a target. However, traditional use of explicit cues can degrade visual search performance due to scene distortion, occlusion and addition of visual clutter. In contrast, Subtle Cueing has been previously proposed as an alter-native to explicit cueing, but little is known about how well it works for head-tracked head worn displays (HWDs). We investigated the effect of Subtle Cueing for head-tracked head worn displays, using visual search research methods in simulated augmented reality environments. Our user study found that Subtle Cueing improves visual search performance, and serves as a feasible cueing mechanism for AR environments using HWDs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Goal-oriented visual search in augmented reality can be facilitated by using visual cues to call attention to a target. However, traditional use of explicit cues can degrade visual search performance due to scene distortion, occlusion and addition of visual clutter. In contrast, Subtle Cueing has been previously proposed as an alter-native to explicit cueing, but little is known about how well it works for head-tracked head worn displays (HWDs). We investigated the effect of Subtle Cueing for head-tracked head worn displays, using visual search research methods in simulated augmented reality environments. Our user study found that Subtle Cueing improves visual search performance, and serves as a feasible cueing mechanism for AR environments using HWDs.",
"fno": "06671800",
"keywords": [
"Visualization",
"Augmented Reality",
"Head",
"Magnetic Heads",
"Clutter",
"Erbium",
"Mobile Communication",
"Visual Search",
"Attention",
"Subtle Visual Cueing"
],
"authors": [
{
"affiliation": "Dept. of Electr. & Comput. Eng., Nat. Univ. of Singapore, Singapore, Singapore",
"fullName": "Weiquan Lu",
"givenName": null,
"surname": "Weiquan Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Optoelectron., Beijing Inst. of Technol., Beijing, China",
"fullName": "Dan Feng",
"givenName": null,
"surname": "Dan Feng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Columbia Univ., New York, NY, USA",
"fullName": "Steven Feiner",
"givenName": "Steven",
"surname": "Feiner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. & Comput. Eng., Nat. Univ. of Singapore, Singapore, Singapore",
"fullName": "Qi Zhao",
"givenName": null,
"surname": "Qi Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. & Comput. Eng., Nat. Univ. of Singapore, Singapore, Singapore",
"fullName": "Henry Been-Lim Duh",
"givenName": "Henry Been-Lim",
"surname": "Duh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-10-01T00:00:00",
"pubType": "proceedings",
"pages": "271-272",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2869-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06671799",
"articleId": "12OmNz61dzi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06671801",
"articleId": "12OmNBrV1LR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2017/6716/0/07893320",
"title": "Comparing leaning-based motion cueing interfaces for virtual reality locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2017/07893320/12OmNqIhFMx",
"parentPublication": {
"id": "proceedings/3dui/2017/6716/0",
"title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402553",
"title": "Subtle cueing for visual search in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402553/12OmNqOffz7",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771384",
"title": "Realistic head motion synthesis for an image-based talking head",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771384/12OmNviZlz1",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771401",
"title": "Realistic head motion synthesis for an image-based talking head",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771401/12OmNzVoBzX",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/10/07346504",
"title": "Social Grouping for Multi-Target Tracking and Head Pose Estimation in Video",
"doi": null,
"abstractUrl": "/journal/tp/2016/10/07346504/13rRUxly9fd",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/03/ttg2014030404",
"title": "Attributes of Subtle Cues for Facilitating Visual Search in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2014/03/ttg2014030404/13rRUyfbwqJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/04/08883081",
"title": "Forecasting People Trajectories and Head Poses by Jointly Reasoning on Tracklets and Vislets",
"doi": null,
"abstractUrl": "/journal/tp/2021/04/08883081/1epRQ82Spnq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089433",
"title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090631",
"title": "Framing the Scene: An Examination of Augmented Reality Head Worn Displays in Construction Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090631/1jIxyGx0KXK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09664291",
"title": "EHTask: Recognizing User Tasks From Eye and Head Movements in Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09664291/1zHDIPIlNBe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJewzlI3CM",
"doi": "10.1109/VRW55335.2022.00171",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"normalizedTitle": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"abstract": "The optical see-through (OST) head-mounted display (HMD) is a typical platform for Augmented Reality (AR) and allows users to experience virtual augmentations in a wearable form factor. Utilizing information of the real-world background, visualization algorithms adapt the layout and representation of content to improve legibility. Typically, this background information is captured via built-in HMD cameras. However, HMD camera views of the real-world scene are distinctively different to the user's view through the OST display. In this work, we propose eye-perspective rendering (EPR) as a solution to synthesize high fidelity renderings of the user's view for mobile OST HMD to enable adaptation algorithms to utilize visual information as seen from the perspective of the user to improve placement, rendering and, thus, legibility of content.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The optical see-through (OST) head-mounted display (HMD) is a typical platform for Augmented Reality (AR) and allows users to experience virtual augmentations in a wearable form factor. Utilizing information of the real-world background, visualization algorithms adapt the layout and representation of content to improve legibility. Typically, this background information is captured via built-in HMD cameras. However, HMD camera views of the real-world scene are distinctively different to the user's view through the OST display. In this work, we propose eye-perspective rendering (EPR) as a solution to synthesize high fidelity renderings of the user's view for mobile OST HMD to enable adaptation algorithms to utilize visual information as seen from the perspective of the user to improve placement, rendering and, thus, legibility of content.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The optical see-through (OST) head-mounted display (HMD) is a typical platform for Augmented Reality (AR) and allows users to experience virtual augmentations in a wearable form factor. Utilizing information of the real-world background, visualization algorithms adapt the layout and representation of content to improve legibility. Typically, this background information is captured via built-in HMD cameras. However, HMD camera views of the real-world scene are distinctively different to the user's view through the OST display. In this work, we propose eye-perspective rendering (EPR) as a solution to synthesize high fidelity renderings of the user's view for mobile OST HMD to enable adaptation algorithms to utilize visual information as seen from the perspective of the user to improve placement, rendering and, thus, legibility of content.",
"fno": "840200a640",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Helmet Mounted Displays",
"Rendering Computer Graphics",
"Virtual Reality",
"Mobile OST HMD",
"Adaptation Algorithms",
"Visual Information",
"Placement Rendering",
"Legibility",
"Towards Eye Perspective Rendering",
"Typical Platform",
"Augmented Reality",
"Virtual Augmentations",
"Wearable Form Factor",
"Utilizing Information",
"Real World Background",
"Visualization Algorithms",
"Background Information",
"HMD Cameras",
"HMD Camera Views",
"Real World Scene",
"OST Display",
"High Fidelity Renderings",
"Visualization",
"Head Mounted Displays",
"Three Dimensional Displays",
"Conferences",
"Resists",
"User Interfaces",
"Rendering Computer Graphics"
],
"authors": [
{
"affiliation": "Salzburg University of Applied Sciences",
"fullName": "Gerlinde Emsenhuber",
"givenName": "Gerlinde",
"surname": "Emsenhuber",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Salzburg University of Applied Sciences",
"fullName": "Michael Domhardt",
"givenName": "Michael",
"surname": "Domhardt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Otago",
"fullName": "Tobias Langlotz",
"givenName": "Tobias",
"surname": "Langlotz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graz University of Technology",
"fullName": "Denis Kalkofen",
"givenName": "Denis",
"surname": "Kalkofen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Salzburg University of Applied Sciences",
"fullName": "Markus Tatzgern",
"givenName": "Markus",
"surname": "Tatzgern",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "640-641",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a638",
"articleId": "1CJdbs0tbsQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a642",
"articleId": "1CJf84U99M4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504717",
"title": "OST Rift: Temporally consistent augmented reality with a consumer optical see-through head-mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504717/12OmNzXFoKD",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a470",
"title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090625",
"title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a649",
"title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a413",
"title": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1hITKdHGg",
"doi": "10.1109/CVPR52688.2022.01973",
"title": "HeadNeRF: A Realtime NeRF-based Parametric Head Model",
"normalizedTitle": "HeadNeRF: A Realtime NeRF-based Parametric Head Model",
"abstract": "In this paper, we propose HeadNeRF, a novel NeRF-based parametric head model that integrates the neural radiance field to the parametric representation of the human head. It can render high fidelity head images in real-time on modern GPUs, and supports directly controlling the generated images' rendering pose and various semantic attributes. Different from existing related parametric models, we use the neural radiance fields as a novel 3D proxy instead of the traditional 3D textured mesh, which makes that HeadNeRF is able to generate high fidelity images. However, the computationally expensive rendering process of the original NeRF hinders the construction of the parametric NeRF model. To address this issue, we adopt the strategy of integrating 2D neural rendering to the rendering process of NeRF and design novel loss terms. As a result, the rendering speed of HeadNeRF can be significantly accelerated, and the rendering time of one frame is reduced from 5s to 25ms. The well designed loss terms also improve the rendering accuracy, and the fine-level details of the human head, such as the gaps between teeth, wrinkles, and beards, can be represented and synthesized by HeadNeRF. Extensive experimental results and several applications demonstrate its effectiveness. The trained parametric model is available at https://github.com/CrisHY1995/headnerf.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose HeadNeRF, a novel NeRF-based parametric head model that integrates the neural radiance field to the parametric representation of the human head. It can render high fidelity head images in real-time on modern GPUs, and supports directly controlling the generated images' rendering pose and various semantic attributes. Different from existing related parametric models, we use the neural radiance fields as a novel 3D proxy instead of the traditional 3D textured mesh, which makes that HeadNeRF is able to generate high fidelity images. However, the computationally expensive rendering process of the original NeRF hinders the construction of the parametric NeRF model. To address this issue, we adopt the strategy of integrating 2D neural rendering to the rendering process of NeRF and design novel loss terms. As a result, the rendering speed of HeadNeRF can be significantly accelerated, and the rendering time of one frame is reduced from 5s to 25ms. The well designed loss terms also improve the rendering accuracy, and the fine-level details of the human head, such as the gaps between teeth, wrinkles, and beards, can be represented and synthesized by HeadNeRF. Extensive experimental results and several applications demonstrate its effectiveness. The trained parametric model is available at https://github.com/CrisHY1995/headnerf.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose HeadNeRF, a novel NeRF-based parametric head model that integrates the neural radiance field to the parametric representation of the human head. It can render high fidelity head images in real-time on modern GPUs, and supports directly controlling the generated images' rendering pose and various semantic attributes. Different from existing related parametric models, we use the neural radiance fields as a novel 3D proxy instead of the traditional 3D textured mesh, which makes that HeadNeRF is able to generate high fidelity images. However, the computationally expensive rendering process of the original NeRF hinders the construction of the parametric NeRF model. To address this issue, we adopt the strategy of integrating 2D neural rendering to the rendering process of NeRF and design novel loss terms. As a result, the rendering speed of HeadNeRF can be significantly accelerated, and the rendering time of one frame is reduced from 5s to 25ms. The well designed loss terms also improve the rendering accuracy, and the fine-level details of the human head, such as the gaps between teeth, wrinkles, and beards, can be represented and synthesized by HeadNeRF. Extensive experimental results and several applications demonstrate its effectiveness. The trained parametric model is available at https://github.com/CrisHY1995/headnerf.",
"fno": "694600u0342",
"keywords": [
"Coprocessors",
"Image Texture",
"Rendering Computer Graphics",
"Solid Modelling",
"Realtime Ne RF Based Parametric Head Model",
"Neural Radiance Field",
"Parametric Representation",
"Human Head",
"High Fidelity Head Images",
"Generated Images",
"Parametric Models",
"3 D Textured Mesh",
"High Fidelity Images",
"Rendering Speed",
"Rendering Time",
"Loss Terms",
"Rendering Accuracy",
"Trained Parametric Model",
"2 D Neural",
"Ethics",
"Computer Vision",
"Head",
"Three Dimensional Displays",
"Semantics",
"Rendering Computer Graphics",
"Real Time Systems"
],
"authors": [
{
"affiliation": "University of Science and Technology of China",
"fullName": "Yang Hong",
"givenName": "Yang",
"surname": "Hong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Bo Peng",
"givenName": "Bo",
"surname": "Peng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Haiyao Xiao",
"givenName": "Haiyao",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Ligang Liu",
"givenName": "Ligang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Juyong Zhang",
"givenName": "Juyong",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "20342-20352",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "694600u0332",
"articleId": "1H1mHD2RrtS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600u0353",
"articleId": "1H1m5zBUGNa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2019/05/08643434",
"title": "SGaze: A Data-Driven Eye-Head Coordination Model for Realtime Gaze Prediction",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08643434/18K0lRIKi7m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f764",
"title": "AD-NeRF: Audio Driven Neural Radiance Fields for Talking Head Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f764/1BmKdD1ODqU",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09888037",
"title": "MPS-NeRF: Generalizable 3D Human Rendering From Multiview Images",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09888037/1GBRkqcf7m8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09872532",
"title": "FoV-NeRF: Foveated Neural Radiance Fields for Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09872532/1GhRVOIAS4g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8332",
"title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i238",
"title": "Block-NeRF: Scalable Large Scene Neural View Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i238/1H1hVQ0jgBy",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600q6169",
"title": "NeRF in the Dark: High Dynamic Range View Synthesis from Noisy Raw Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600q6169/1H1isrcMxLW",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f428",
"title": "Point-NeRF: Point-based Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09909994",
"title": "Recursive-NeRF: An Efficient and Dynamically Growing NeRF",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09909994/1Hcj8wIB6s8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f755",
"title": "X-NeRF: Explicit Neural Radiance Field for Multi-Scene 360<sup>°</sup> Insufficient RGB-D Views",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f755/1KxV7reNb6E",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxwp2g0VO",
"doi": "10.1109/VRW50115.2020.00228",
"title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"normalizedTitle": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"abstract": "The simplified, manual calibration of commercial Optical See-Through Head-Mounted Displays (OST-HMDs) is neither accurate nor convenient for medical applications. An interaction-free calibration method that can be easily implemented in commercial headsets is thus desired. State-of-the-art automatic calibrations simplify the eye-screen system as a pinhole camera and tedious offline calibrations are required. Furthermore, they have never been tested on original commercial products. We present a gaze-based automatic calibration method that can be easily implemented in commercial headsets without knowing hardware details. The location of the virtual target is revised in world coordinate according to the real-time tracked eye gaze. The algorithm has been tested with the Microsoft HoloLens. Current quantitative and qualitative user studies show that the automatically calibrated display is statistically comparable with the manually calibrated display under both monocular and binocular rendering mode. Since it is cumbersome to ask users to perform manual calibrations every time the HMD is re-positioned, our method provides a comparably accurate but more convenient and practical solution to the HMD calibration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The simplified, manual calibration of commercial Optical See-Through Head-Mounted Displays (OST-HMDs) is neither accurate nor convenient for medical applications. An interaction-free calibration method that can be easily implemented in commercial headsets is thus desired. State-of-the-art automatic calibrations simplify the eye-screen system as a pinhole camera and tedious offline calibrations are required. Furthermore, they have never been tested on original commercial products. We present a gaze-based automatic calibration method that can be easily implemented in commercial headsets without knowing hardware details. The location of the virtual target is revised in world coordinate according to the real-time tracked eye gaze. The algorithm has been tested with the Microsoft HoloLens. Current quantitative and qualitative user studies show that the automatically calibrated display is statistically comparable with the manually calibrated display under both monocular and binocular rendering mode. Since it is cumbersome to ask users to perform manual calibrations every time the HMD is re-positioned, our method provides a comparably accurate but more convenient and practical solution to the HMD calibration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The simplified, manual calibration of commercial Optical See-Through Head-Mounted Displays (OST-HMDs) is neither accurate nor convenient for medical applications. An interaction-free calibration method that can be easily implemented in commercial headsets is thus desired. State-of-the-art automatic calibrations simplify the eye-screen system as a pinhole camera and tedious offline calibrations are required. Furthermore, they have never been tested on original commercial products. We present a gaze-based automatic calibration method that can be easily implemented in commercial headsets without knowing hardware details. The location of the virtual target is revised in world coordinate according to the real-time tracked eye gaze. The algorithm has been tested with the Microsoft HoloLens. Current quantitative and qualitative user studies show that the automatically calibrated display is statistically comparable with the manually calibrated display under both monocular and binocular rendering mode. Since it is cumbersome to ask users to perform manual calibrations every time the HMD is re-positioned, our method provides a comparably accurate but more convenient and practical solution to the HMD calibration.",
"fno": "09090625",
"keywords": [
"Calibration",
"Cameras",
"Three Dimensional Displays",
"Manuals",
"Head Mounted Displays",
"Headphones",
"Rendering Computer Graphics"
],
"authors": [
{
"affiliation": "Imperial College London,Mechatronics in Medicine Laboratory",
"fullName": "Xue Hu",
"givenName": "Xue",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Pisa,Department of Information Engineering",
"fullName": "Fabrizio Cutolo",
"givenName": "Fabrizio",
"surname": "Cutolo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London,Mechatronics in Medicine Laboratory",
"fullName": "Fabio Tatti",
"givenName": "Fabio",
"surname": "Tatti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London,Mechatronics in Medicine Laboratory",
"fullName": "Ferdinando Rodriguez y Baena",
"givenName": "Ferdinando Rodriguez y",
"surname": "Baena",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "754-755",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090639",
"articleId": "1jIxvN0ibpS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090450",
"articleId": "1jIxyCwDK9i",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504693",
"title": "A calibration method for optical see-through head-mounted displays with a depth camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504693/12OmNAnMuMd",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a043",
"title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798846",
"title": "Interaction-free calibration for optical see-through head-mounted displays based on 3D Eye localization",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798846/12OmNCdBDWL",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446429",
"title": "Impact of Alignment Point Distance Distribution on SPAAM Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446429/13bd1gCd7Sz",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07021939",
"title": "Subjective Evaluation of a Semi-Automatic Optical See-Through Head-Mounted Display Calibration Technique",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07021939/13rRUwInvyB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09253561",
"title": "AR-Loupe: Magnified Augmented Reality by Combining an Optical See-Through Head-Mounted Display and a Loupe",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09253561/1oDXHeBJHNe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxE2mWh",
"title": "2013 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwudQMb",
"doi": "10.1109/VR.2013.6549334",
"title": "Message from the Program Chairs",
"normalizedTitle": "Message from the Program Chairs",
"abstract": "The IEEE Virtual Reality (VR) 2013 full papers program, contained herein, includes 21 papers that present research, applications, and systems in the field of virtual reality. They were selected from 97 full paper submissions by an international program committee of 64 members, supported by 225 external expert reviewers, leading to an acceptance rate for IEEE Virtual Reality 2013 of 21.6%. All papers appearing in this issue have undergone a two-round review process. In the first round review, at least four expert reviewers reviewed the work. The paper chairs selected the primary and secondary reviewers from the international program committee, and the primary reviewer then recruited at least two external experts. After completion of all reviews, the primary reviewer led an online discussion phase, which resulted in an initial recommendation for acceptance or rejection and a set of modifications that were deemed necessary. Based on this recommendation, the program committee, at the two-day online meeting, selected an initial set of papers for preliminary acceptance. The authors of these papers were given the opportunity to refine and resubmit their work. In the second round review, IPC members checked whether the changes made were sufficient to warrant final acceptance. Based on their input, paper chairs made the final decisions for papers appearing in the TVCG issue. The IEEE VR scientific program also includes 13 short papers published in a separate report.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The IEEE Virtual Reality (VR) 2013 full papers program, contained herein, includes 21 papers that present research, applications, and systems in the field of virtual reality. They were selected from 97 full paper submissions by an international program committee of 64 members, supported by 225 external expert reviewers, leading to an acceptance rate for IEEE Virtual Reality 2013 of 21.6%. All papers appearing in this issue have undergone a two-round review process. In the first round review, at least four expert reviewers reviewed the work. The paper chairs selected the primary and secondary reviewers from the international program committee, and the primary reviewer then recruited at least two external experts. After completion of all reviews, the primary reviewer led an online discussion phase, which resulted in an initial recommendation for acceptance or rejection and a set of modifications that were deemed necessary. Based on this recommendation, the program committee, at the two-day online meeting, selected an initial set of papers for preliminary acceptance. The authors of these papers were given the opportunity to refine and resubmit their work. In the second round review, IPC members checked whether the changes made were sufficient to warrant final acceptance. Based on their input, paper chairs made the final decisions for papers appearing in the TVCG issue. The IEEE VR scientific program also includes 13 short papers published in a separate report.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The IEEE Virtual Reality (VR) 2013 full papers program, contained herein, includes 21 papers that present research, applications, and systems in the field of virtual reality. They were selected from 97 full paper submissions by an international program committee of 64 members, supported by 225 external expert reviewers, leading to an acceptance rate for IEEE Virtual Reality 2013 of 21.6%. All papers appearing in this issue have undergone a two-round review process. In the first round review, at least four expert reviewers reviewed the work. The paper chairs selected the primary and secondary reviewers from the international program committee, and the primary reviewer then recruited at least two external experts. After completion of all reviews, the primary reviewer led an online discussion phase, which resulted in an initial recommendation for acceptance or rejection and a set of modifications that were deemed necessary. Based on this recommendation, the program committee, at the two-day online meeting, selected an initial set of papers for preliminary acceptance. The authors of these papers were given the opportunity to refine and resubmit their work. In the second round review, IPC members checked whether the changes made were sufficient to warrant final acceptance. Based on their input, paper chairs made the final decisions for papers appearing in the TVCG issue. The IEEE VR scientific program also includes 13 short papers published in a separate report.",
"fno": "06549334",
"keywords": [],
"authors": [],
"idPrefix": "vr",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2013-03-01T00:00:00",
"pubType": "proceedings",
"pages": "xii-xii",
"year": "2013",
"issn": "1087-8270",
"isbn": "978-1-4673-4795-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06549333",
"articleId": "12OmNxYtubh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06549335",
"articleId": "12OmNzyGH9J",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iswc/2004/2186/1/01364675",
"title": "Message from the Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2004/01364675/12OmNAY79hH",
"parentPublication": {
"id": "proceedings/iswc/2004/2186/1",
"title": "Eighth International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnp/2013/1270/0/06733568",
"title": "Message from the technical program chairs",
"doi": null,
"abstractUrl": "/proceedings-article/icnp/2013/06733568/12OmNBKEynw",
"parentPublication": {
"id": "proceedings/icnp/2013/1270/0",
"title": "2013 21st IEEE International Conference on Network Protocols (ICNP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2011/1101/0/06126560",
"title": "Message from Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2011/06126560/12OmNzAFSXb",
"parentPublication": {
"id": "proceedings/iccv/2011/1101/0",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2012/4905/0/4905z017",
"title": "Message from Program Co-chairs",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2012/4905z017/12OmNzAohZL",
"parentPublication": {
"id": "proceedings/icdm/2012/4905/0",
"title": "2012 IEEE 12th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cyberc/2012/4810/0/4810z013",
"title": "Message from the Technical Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2012/4810z013/12OmNzEVRYD",
"parentPublication": {
"id": "proceedings/cyberc/2012/4810/0",
"title": "2012 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccgrid/2022/9956/0/995600z023",
"title": "Message from the Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/ccgrid/2022/995600z023/1F8ze00bK2A",
"parentPublication": {
"id": "proceedings/ccgrid/2022/9956/0",
"title": "2022 22nd International Symposium on Cluster, Cloud and Internet Computing (CCGrid)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2020/3497/0/349700z023",
"title": "Message from the Program Chairs: SP 2020",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2020/349700z023/1lshgpvuzS0",
"parentPublication": {
"id": "proceedings/sp/2020/3497/0/",
"title": "2020 IEEE Symposium on Security and Privacy (SP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccgrid/2020/6095/0/09139662",
"title": "Message from the Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/ccgrid/2020/09139662/1lsstFYguOc",
"parentPublication": {
"id": "proceedings/ccgrid/2020/6095/0",
"title": "2020 20th IEEE/ACM International Symposium on Cluster, Cloud and Internet Computing (CCGRID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2020/7438/0/09218215",
"title": "Message from the Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/re/2020/09218215/1nMQwYOmcIU",
"parentPublication": {
"id": "proceedings/re/2020/7438/0",
"title": "2020 IEEE 28th International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900z166",
"title": "Message from the General and Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900z166/1yeJnpNlo0o",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAOsMGx",
"title": "Fifth Annual Conference on AI, and Planning in High Autonomy Systems",
"acronym": "aihas",
"groupId": "1000028",
"volume": "0",
"displayVolume": "0",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC4wtKv",
"doi": "10.1109/AIHAS.1994.390502",
"title": "Fluids in a distributed interactive simulation",
"normalizedTitle": "Fluids in a distributed interactive simulation",
"abstract": "Today's training simulators have dealt mainly with vehicle dynamics, artillery dynamics and soil manipulations. Important features such as fluid surface effects and flow over a terrain surface have been neglected decreasing the realism of the simulation. The modeling and animation of fluids have recently been pursued vigorously in computer graphics but fluid in a real-time networked virtual environment has not been studied. This paper investigates issues concerning the implementation of fluids in a distributed interactive simulation. Several fluid models and a player/ghost simulation strategy are examined.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Today's training simulators have dealt mainly with vehicle dynamics, artillery dynamics and soil manipulations. Important features such as fluid surface effects and flow over a terrain surface have been neglected decreasing the realism of the simulation. The modeling and animation of fluids have recently been pursued vigorously in computer graphics but fluid in a real-time networked virtual environment has not been studied. This paper investigates issues concerning the implementation of fluids in a distributed interactive simulation. Several fluid models and a player/ghost simulation strategy are examined.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Today's training simulators have dealt mainly with vehicle dynamics, artillery dynamics and soil manipulations. Important features such as fluid surface effects and flow over a terrain surface have been neglected decreasing the realism of the simulation. The modeling and animation of fluids have recently been pursued vigorously in computer graphics but fluid in a real-time networked virtual environment has not been studied. This paper investigates issues concerning the implementation of fluids in a distributed interactive simulation. Several fluid models and a player/ghost simulation strategy are examined.",
"fno": "00390502",
"keywords": [
"Virtual Reality",
"Computer Animation",
"Military Computing",
"Simulation",
"Digital Simulation",
"Real Time Systems",
"Interactive Systems",
"Training",
"Distributed Interactive Simulation",
"Training Simulators",
"Fluid Surface Effects",
"Terrain Surface",
"Animation",
"Fluid Modeling",
"Computer Graphics",
"Real Time Networked Virtual Environment",
"Player Ghost Simulation",
"Computational Modeling",
"Vehicle Dynamics",
"Fluid Dynamics",
"Weapons",
"Floods",
"Land Vehicles",
"Road Vehicles",
"Manipulator Dynamics",
"Soil",
"Animation"
],
"authors": [
{
"affiliation": "Inst. for Simul. and Training, Central Florida Univ., Orlando, FL, USA",
"fullName": "Chen Jinxiong",
"givenName": null,
"surname": "Chen Jinxiong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M. Sartor",
"givenName": "M.",
"surname": "Sartor",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aihas",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "43,44,45",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00390501",
"articleId": "12OmNzVoBAo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00390503",
"articleId": "12OmNyywxAz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/viz/2009/3734/0/3734a076",
"title": "VOF Method for Fluids and Solids on Octree Structure",
"doi": null,
"abstractUrl": "/proceedings-article/viz/2009/3734a076/12OmNBkxspY",
"parentPublication": {
"id": "proceedings/viz/2009/3734/0",
"title": "Visualisation, International Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a118",
"title": "Anisotropic Surface Reconstruction for Multiphase Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a118/12OmNCmpcVe",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720358",
"title": "Interactive Simulation and Visualization of Fluids with Surface Raycasting",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720358/12OmNwAKCLk",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a694",
"title": "Dynamic Fluids Mixed with Local-Control Effects",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a694/12OmNwqft4O",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdp/1999/0059/0/00590273",
"title": "Parallel Simulation of Incompressible Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/pdp/1999/00590273/12OmNxeuteu",
"parentPublication": {
"id": "proceedings/pdp/1999/0059/0",
"title": "Seventh Euromicro Workshop on Parallel and Distributed Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1995/7062/0/70620198",
"title": "Dynamic simulation of splashing fluids",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1995/70620198/12OmNy1SFHC",
"parentPublication": {
"id": "proceedings/ca/1995/7062/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010147",
"title": "Virtual Rheoscopic Fluids",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010147/13rRUwgQpDr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/11/ttg2011111714",
"title": "Six Degrees-of-Freedom Haptic Interaction with Fluids",
"doi": null,
"abstractUrl": "/journal/tg/2011/11/ttg2011111714/13rRUxNW1Zj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/03/mcg2007030090",
"title": "A Precomputed Approach for Real-Time Haptic Interaction with Fluids",
"doi": null,
"abstractUrl": "/magazine/cg/2007/03/mcg2007030090/13rRUxjyX6p",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1997/03/mcg1997030052",
"title": "Real-Time Fluid Simulation in a Dynamic Virtual Environment",
"doi": null,
"abstractUrl": "/magazine/cg/1997/03/mcg1997030052/13rRUyXKxU3",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCbCrV8",
"title": "Convergence Information Technology, International Conference on",
"acronym": "iccit",
"groupId": "1003018",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvq5jFi",
"doi": "10.1109/ICCIT.2009.148",
"title": "Performance-Oriented Drilling Fluids Design System with a Neural Network Approach",
"normalizedTitle": "Performance-Oriented Drilling Fluids Design System with a Neural Network Approach",
"abstract": "Drilling fluids play a key role in the minimization of well bore problems when drilling oil or gas wells, usually the design of drilling fluids is depended on many experiments with experience. Rule-based and case-based reasoning drilling fluid system was designed with theory of expert system by some researchers. But it is very difficult to get to know and express precious relationship between drilling fluid formulation and its performance. Performance of drilling fluids can be measured with test device when drilling fluid is ready. A performance oriented drilling fluids design system is presented, with supervised artificial neural network algorithm to acquire knowledge by learning from experimental data. The system can be used to design drilling fluid according to specified performance. Experimental results show that drilling fluids designed by the system can satisfy specified performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Drilling fluids play a key role in the minimization of well bore problems when drilling oil or gas wells, usually the design of drilling fluids is depended on many experiments with experience. Rule-based and case-based reasoning drilling fluid system was designed with theory of expert system by some researchers. But it is very difficult to get to know and express precious relationship between drilling fluid formulation and its performance. Performance of drilling fluids can be measured with test device when drilling fluid is ready. A performance oriented drilling fluids design system is presented, with supervised artificial neural network algorithm to acquire knowledge by learning from experimental data. The system can be used to design drilling fluid according to specified performance. Experimental results show that drilling fluids designed by the system can satisfy specified performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Drilling fluids play a key role in the minimization of well bore problems when drilling oil or gas wells, usually the design of drilling fluids is depended on many experiments with experience. Rule-based and case-based reasoning drilling fluid system was designed with theory of expert system by some researchers. But it is very difficult to get to know and express precious relationship between drilling fluid formulation and its performance. Performance of drilling fluids can be measured with test device when drilling fluid is ready. A performance oriented drilling fluids design system is presented, with supervised artificial neural network algorithm to acquire knowledge by learning from experimental data. The system can be used to design drilling fluid according to specified performance. Experimental results show that drilling fluids designed by the system can satisfy specified performance.",
"fno": "3896b280",
"keywords": [
"Drilling Fluid Performance Oriented Artificial Neural Network"
],
"authors": [
{
"affiliation": null,
"fullName": "Yongbin Zhang",
"givenName": "Yongbin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yeli Li",
"givenName": "Yeli",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Peng Cao",
"givenName": "Peng",
"surname": "Cao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1280-1283",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3896-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3896b276",
"articleId": "12OmNAYXWvh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3896b284",
"articleId": "12OmNqBtiOR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iwcse/2009/3881/2/3881b494",
"title": "Study and Implement of Downward Communication Function in Rotary Steerable Drilling System",
"doi": null,
"abstractUrl": "/proceedings-article/iwcse/2009/3881b494/12OmNBO3K3S",
"parentPublication": {
"id": "proceedings/iwcse/2009/3881/2",
"title": "Computer Science and Engineering, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aihas/1994/6440/0/00390502",
"title": "Fluids in a distributed interactive simulation",
"doi": null,
"abstractUrl": "/proceedings-article/aihas/1994/00390502/12OmNC4wtKv",
"parentPublication": {
"id": "proceedings/aihas/1994/6440/0",
"title": "Fifth Annual Conference on AI, and Planning in High Autonomy Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iita/2009/3859/1/3859a056",
"title": "Application on Lithology Recognition with BP Artificial Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2009/3859a056/12OmNwtWfEZ",
"parentPublication": {
"id": "proceedings/iita/2009/3859/1",
"title": "2009 Third International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/1/3583a732",
"title": "Study on Torsional Vibration Performance of Twist Drill in Axial Vibration Drilling",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583a732/12OmNzlD9eR",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/3",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/11/ttg2011111714",
"title": "Six Degrees-of-Freedom Haptic Interaction with Fluids",
"doi": null,
"abstractUrl": "/journal/tg/2011/11/ttg2011111714/13rRUxNW1Zj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010004",
"title": "Multiphase Flow of Immiscible Fluids on Unstructured Moving Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010004/13rRUxcbnCr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apct/2022/8345/0/834500a082",
"title": "Information Processing and Designing of Gas Well Drilling Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/apct/2022/834500a082/1FAMqq5BTrO",
"parentPublication": {
"id": "proceedings/apct/2022/8345/0",
"title": "2022 Asia-Pacific Computer Technologies Conference (APCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icapc/2022/6303/0/630300a137",
"title": "New Leakage Dynamics Model Based on Thermal-Hydraulic-Mechanical Coupling Theory",
"doi": null,
"abstractUrl": "/proceedings-article/icapc/2022/630300a137/1M7KW4VSPhS",
"parentPublication": {
"id": "proceedings/icapc/2022/6303/0",
"title": "2022 International Conference on Applied Physics and Computing (ICAPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/edge/2020/8254/0/825400a111",
"title": "Camera-Based Edge Analytics for Drilling Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/edge/2020/825400a111/1pDriDasjwk",
"parentPublication": {
"id": "proceedings/edge/2020/8254/0",
"title": "2020 IEEE International Conference on Edge Computing (EDGE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2021/4486/0/448600a294",
"title": "Stable Platform Research on Control Method of Fully Rotary Steerable Drilling Tool",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2021/448600a294/1yEZiVncMuc",
"parentPublication": {
"id": "proceedings/iccnea/2021/4486/0",
"title": "2021 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzWfp8s",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"acronym": "svr",
"groupId": "1800426",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvs4vmP",
"doi": "10.1109/SVR.2017.47",
"title": "Screen Space Rendering Solution for Multiphase SPH Simulation",
"normalizedTitle": "Screen Space Rendering Solution for Multiphase SPH Simulation",
"abstract": "Fluid simulation using meshless methods has increasingly become a robust way to solve mechanics problems that require dealing with large deformations, and has become very popular in many applications such as naval engineering, mechanical engineering, movies and games. One of the main methods is the Smoothed Particle Hydrodynamics (SPH). This work has two main goals: to propose a multiphase SPH formulation by extending the work of Silva et al. and to propose a shader based render solution for this kind of simulation. The proposed SPH method was able to simulate multiphase fluids with up to one million particles and the renderer was able to generate visually plausible results up to 60fps.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Fluid simulation using meshless methods has increasingly become a robust way to solve mechanics problems that require dealing with large deformations, and has become very popular in many applications such as naval engineering, mechanical engineering, movies and games. One of the main methods is the Smoothed Particle Hydrodynamics (SPH). This work has two main goals: to propose a multiphase SPH formulation by extending the work of Silva et al. and to propose a shader based render solution for this kind of simulation. The proposed SPH method was able to simulate multiphase fluids with up to one million particles and the renderer was able to generate visually plausible results up to 60fps.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Fluid simulation using meshless methods has increasingly become a robust way to solve mechanics problems that require dealing with large deformations, and has become very popular in many applications such as naval engineering, mechanical engineering, movies and games. One of the main methods is the Smoothed Particle Hydrodynamics (SPH). This work has two main goals: to propose a multiphase SPH formulation by extending the work of Silva et al. and to propose a shader based render solution for this kind of simulation. The proposed SPH method was able to simulate multiphase fluids with up to one million particles and the renderer was able to generate visually plausible results up to 60fps.",
"fno": "3588a309",
"keywords": [
"Computational Fluid Dynamics",
"Rendering Computer Graphics",
"Smoothed Particle Hydrodynamics",
"Shader Based Render Solution",
"Multiphase Fluids",
"Screen Space Rendering Solution",
"Multiphase SPH Simulation",
"Fluid Simulation",
"Meshless Methods",
"Mechanics Problems",
"Deformations",
"Fluid Dynamics",
"Rendering Computer Graphics",
"Surface Reconstruction",
"Solid Modeling",
"Mathematical Model",
"Deformable Models",
"Viscosity",
"SPH",
"Multiphase",
"Rendering",
"Screen Space"
],
"authors": [
{
"affiliation": null,
"fullName": "Caio José dos Santos Brito",
"givenName": "Caio José dos Santos",
"surname": "Brito",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mozart William Santos Almeida",
"givenName": "Mozart William Santos",
"surname": "Almeida",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "André Luiz Buarque Vieira-e-Silva",
"givenName": "André Luiz Buarque",
"surname": "Vieira-e-Silva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "João Marcelo Xavier Natario Teixeira",
"givenName": "João Marcelo Xavier Natario",
"surname": "Teixeira",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Veronica Teichrieb",
"givenName": "Veronica",
"surname": "Teichrieb",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "svr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-11-01T00:00:00",
"pubType": "proceedings",
"pages": "309-318",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-3588-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3588a297",
"articleId": "12OmNyS6RH4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3588a319",
"articleId": "12OmNwErpMd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2014/6854/0/6854a372",
"title": "Parameterized Rendering for Multiresolution Terrain Structure",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a372/12OmNCdBDGL",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a118",
"title": "Anisotropic Surface Reconstruction for Multiphase Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a118/12OmNCmpcVe",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscmi/2016/3696/0/3696a184",
"title": "Realtime Haptic Rendering in Hybrid Environment Using Unified SPH Method",
"doi": null,
"abstractUrl": "/proceedings-article/iscmi/2016/3696a184/12OmNxUdv6m",
"parentPublication": {
"id": "proceedings/iscmi/2016/3696/0",
"title": "2016 3rd International Conference on Soft Computing & Machine Intelligence (ISCMI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459417",
"title": "Piecewise planar stereo for image-based rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459417/12OmNz61d3q",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a126",
"title": "Efficient HPR-Based Rendering of Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a126/12OmNzIUfWI",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061533",
"title": "Efficient High-Quality Volume Rendering of SPH Data",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061533/13rRUwInvJc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/08/07239614",
"title": "Multiphase Interface Tracking with Fast Semi-Lagrangian Contouring",
"doi": null,
"abstractUrl": "/journal/tg/2016/08/07239614/13rRUwInvfd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/02/07829433",
"title": "Memory-Efficient On-the-Fly Voxelization and Rendering of Particle Data",
"doi": null,
"abstractUrl": "/journal/tg/2018/02/07829433/13rRUygBwhM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g177",
"title": "Multi-View Mesh Reconstruction with Neural Deferred Shading",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g177/1H0NScvhUC4",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09999345",
"title": "Polarimetric Multi-View Inverse Rendering",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09999345/1JqCybj0DBu",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxA3Z3U",
"title": "Computer Animation",
"acronym": "ca",
"groupId": "1000121",
"volume": "0",
"displayVolume": "0",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy1SFHC",
"doi": "10.1109/CA.1995.393532",
"title": "Dynamic simulation of splashing fluids",
"normalizedTitle": "Dynamic simulation of splashing fluids",
"abstract": "We describe a method for modeling the dynamic behavior of splashing fluids. The model simulates the behavior of a fluid when objects impact or float on its surface. The forces generated by the objects create waves and splashes on the surface of the fluid. To demonstrate the realism and limitations of the model, images from a computer-generated animation are presented and compared with video frames of actual splashes occurring under similar initial conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe a method for modeling the dynamic behavior of splashing fluids. The model simulates the behavior of a fluid when objects impact or float on its surface. The forces generated by the objects create waves and splashes on the surface of the fluid. To demonstrate the realism and limitations of the model, images from a computer-generated animation are presented and compared with video frames of actual splashes occurring under similar initial conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe a method for modeling the dynamic behavior of splashing fluids. The model simulates the behavior of a fluid when objects impact or float on its surface. The forces generated by the objects create waves and splashes on the surface of the fluid. To demonstrate the realism and limitations of the model, images from a computer-generated animation are presented and compared with video frames of actual splashes occurring under similar initial conditions.",
"fno": "70620198",
"keywords": [
"Computer Animation Digital Simulation Fluids Physics Computing Dynamic Simulation Splashing Fluid Simulation Dynamic Behavior Waves Splashes Computer Generated Animation Video Frames"
],
"authors": [
{
"affiliation": "Coll. of Comput., Georgia Inst. of Technol., Atlanta, GA, USA",
"fullName": "J.F. O'Brien",
"givenName": "J.F.",
"surname": "O'Brien",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coll. of Comput., Georgia Inst. of Technol., Atlanta, GA, USA",
"fullName": "J.K. Hodgins",
"givenName": "J.K.",
"surname": "Hodgins",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ca",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-04-01T00:00:00",
"pubType": "proceedings",
"pages": "198",
"year": "1995",
"issn": null,
"isbn": "0-8186-7062-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "70620190",
"articleId": "12OmNzsrwdD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "70620207",
"articleId": "12OmNBpVPY1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.