data dict |
|---|
{
"proceeding": {
"id": "12OmNwp74rq",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAo45On",
"doi": "10.1109/CVPR.1993.340997",
"title": "Three-dimensional shape reconstruction by active rangefinder",
"normalizedTitle": "Three-dimensional shape reconstruction by active rangefinder",
"abstract": "A new type of rangefinder is proposed, and a method for 3-D object shape reconstruction is described. The proposed rangefinder is compact and light and consists of a charge-coupled device camera, a galvano mirror, and a semiconductor laser. A slit-ray from the laser is scanned by the mirror and patterns of light are made by temporal switching of the laser. Range maps are obtained by the space encoded technique. A 512/spl times/256 range map is captured within 0.3 s. The rangefinder is installed at a tip of a robot manipulator, and takes range maps from various directions. Experimental results are described for range map integration and 3-D shape reconstruction.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "A new type of rangefinder is proposed, and a method for 3-D object shape reconstruction is described. The proposed rangefinder is compact and light and consists of a charge-coupled device camera, a galvano mirror, and a semiconductor laser. A slit-ray from the laser is scanned by the mirror and patterns of light are made by temporal switching of the laser. Range maps are obtained by the space encoded technique. A 512/spl times/256 range map is captured within 0.3 s. The rangefinder is installed at a tip of a robot manipulator, and takes range maps from various directions. Experimental results are described for range map integration and 3-D shape reconstruction.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A new type of rangefinder is proposed, and a method for 3-D object shape reconstruction is described. The proposed rangefinder is compact and light and consists of a charge-coupled device camera, a galvano mirror, and a semiconductor laser. A slit-ray from the laser is scanned by the mirror and patterns of light are made by temporal switching of the laser. Range maps are obtained by the space encoded technique. A 512/spl times/256 range map is captured within 0.3 s. The rangefinder is installed at a tip of a robot manipulator, and takes range maps from various directions. Experimental results are described for range map integration and 3-D shape reconstruction.",
"fno": "00340997",
"keywords": [
"Laser Ranging",
"Image Reconstruction",
"CCD Image Sensors",
"Manipulators",
"Computer Vision",
"Active Rangefinder",
"3 D Object Shape Reconstruction",
"Charge Coupled Device Camera",
"Galvano Mirror",
"Semiconductor Laser",
"Slit Ray",
"Temporal Switching",
"512 Spl Times 256 Range Map",
"Robot Manipulator",
"Range Maps",
"0 3 S",
"Shape",
"Cameras",
"Image Reconstruction",
"Robot Vision Systems",
"Layout",
"Image Coding",
"Semiconductor Lasers",
"Charge Coupled Devices",
"Charge Coupled Image Sensors",
"Mirrors"
],
"authors": [
{
"affiliation": "Dept. of Electr. & Comput. Eng., Nagoya Inst. of Technol., Japan",
"fullName": "Y. Sato",
"givenName": "Y.",
"surname": "Sato",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. & Comput. Eng., Nagoya Inst. of Technol., Japan",
"fullName": "M. Otsuki",
"givenName": "M.",
"surname": "Otsuki",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1993-01-01T00:00:00",
"pubType": "proceedings",
"pages": "142-147",
"year": "1993",
"issn": "1063-6919",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00340996",
"articleId": "12OmNAle6oQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00340998",
"articleId": "12OmNro0I1E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/visapp/2014/8133/3/07295136",
"title": "Three-dimensional visual reconstruction of path shape using a cart with a laser scanner",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295136/12OmNAlvHyp",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012311",
"title": "Laser triangulation range finder available under direct sunlight",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012311/12OmNBWi6Ki",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567323",
"title": "Phase spatial structures in lasers with bichromatical injected signal",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567323/12OmNBpVQ1d",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131796",
"title": "A very fast VLSI rangefinder",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131796/12OmNqIQSjT",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cca/2000/6562/0/00897483",
"title": "Development of a continuous scanning laser Doppler vibrometer for vibration mode shape analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cca/2000/00897483/12OmNweBUDb",
"parentPublication": {
"id": "proceedings/cca/2000/6562/0",
"title": "Proceedings of the 2000 IEEE International Conference on Control Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607656",
"title": "Joint calibration of a camera triplet and a laser rangefinder",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607656/12OmNxdm4Cx",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/3/71083362",
"title": "Wrist-mounted laser rangefinder",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71083362/12OmNy4IEZ2",
"parentPublication": {
"id": "proceedings/iros/1995/7108/3",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdc/2000/6638/3/00914181",
"title": "Three dimensional structure estimation and planning with vision and range",
"doi": null,
"abstractUrl": "/proceedings-article/cdc/2000/00914181/12OmNzwHvux",
"parentPublication": {
"id": "proceedings/cdc/2000/6638/3",
"title": "Proceedings of the 39th IEEE Conference on Decision and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798009",
"title": "Laser-based Photochromic Drawing Method for Rotating Objects with High-speed Visual Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798009/1cJ1a4b7cfS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1997/7943/0/00603885",
"title": "Fusing and guiding range measurements with colour video images",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1997/00603885/1fHGz5Hyq7C",
"parentPublication": {
"id": "proceedings/3dim/1997/7943/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzSh1aC",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"acronym": "icdma",
"groupId": "1800272",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBtl1GA",
"doi": "10.1109/ICDMA.2012.228",
"title": "Three-Dimensional Stability Lobes of Milling Thin-Walled Plate with Different Cutter Geometry Parameters",
"normalizedTitle": "Three-Dimensional Stability Lobes of Milling Thin-Walled Plate with Different Cutter Geometry Parameters",
"abstract": "The milling of thin-walled plate has become a seriously complex problem. Along the machining of this type of structure, large quantities of material are removed with the risk of the instability of the process. This paper studies the effect of helix and normal rake angles on milling stability by analyzing the geometrical relationship of oblique cutting, and obtains the mathematic relationship expressions between the cutter parameters and chatter. Moreover, this paper derives the three-dimensional stability lobes of the spindle speed, axial and radial depths under the condition of different helix angle, normal rake angles and cutter tooth. Through the stability lobes, it can be found that the milling stability is increased with the increment of helix and normal rake angles.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The milling of thin-walled plate has become a seriously complex problem. Along the machining of this type of structure, large quantities of material are removed with the risk of the instability of the process. This paper studies the effect of helix and normal rake angles on milling stability by analyzing the geometrical relationship of oblique cutting, and obtains the mathematic relationship expressions between the cutter parameters and chatter. Moreover, this paper derives the three-dimensional stability lobes of the spindle speed, axial and radial depths under the condition of different helix angle, normal rake angles and cutter tooth. Through the stability lobes, it can be found that the milling stability is increased with the increment of helix and normal rake angles.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The milling of thin-walled plate has become a seriously complex problem. Along the machining of this type of structure, large quantities of material are removed with the risk of the instability of the process. This paper studies the effect of helix and normal rake angles on milling stability by analyzing the geometrical relationship of oblique cutting, and obtains the mathematic relationship expressions between the cutter parameters and chatter. Moreover, this paper derives the three-dimensional stability lobes of the spindle speed, axial and radial depths under the condition of different helix angle, normal rake angles and cutter tooth. Through the stability lobes, it can be found that the milling stability is increased with the increment of helix and normal rake angles.",
"fno": "4772a975",
"keywords": [
"Stability Analysis",
"Milling",
"Mathematical Model",
"Force",
"Equations",
"Geometry",
"Face",
"Three Dimensional Stability",
"Oblique Cutting",
"Helix Angle",
"Normal Rake Angle"
],
"authors": [
{
"affiliation": null,
"fullName": "Tang Aijun",
"givenName": "Tang",
"surname": "Aijun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Liu Zhanqiang",
"givenName": "Liu",
"surname": "Zhanqiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ma Hailong",
"givenName": "Ma",
"surname": "Hailong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "975-978",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2217-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4772a972",
"articleId": "12OmNAsTgSQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4772a979",
"articleId": "12OmNqOwQH7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2010/3962/1/3962a878",
"title": "Cutting Conditions Optimization of Circular Milling Based on Dynamic Modeling and Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962a878/12OmNAXxWWL",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/1",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/2/3583b204",
"title": "Chatter Stability Limits Simulation and Experimental Research on Medium and Low Speed Peripheral Milling",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583b204/12OmNAq3hwv",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/2",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icscse/2017/1401/0/1401a023",
"title": "Retracted: Machining Deformation Control and Compensation Using Whole Mirror Milling Method for Tank Thin-Walled Parts",
"doi": null,
"abstractUrl": "/proceedings-article/icscse/2017/1401a023/12OmNvT2oMN",
"parentPublication": {
"id": "proceedings/icscse/2017/1401/0",
"title": "2017 International Conference on Smart City and Systems Engineering (ICSCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/2/4296c922",
"title": "Process Planning and Simulation Strategies for Perimeter Milling of Thin-walled Flexible Parts Held by Reconfigurable Fixturing System",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296c922/12OmNvxKu1s",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/2",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2010/4077/3/4077e501",
"title": "Study on Dynamic Simulation and Cutting Parameters Optimization on Complex Cutting Conditions Milling Process",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2010/4077e501/12OmNwDAClL",
"parentPublication": {
"id": "proceedings/icicta/2010/4077/3",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2021/2172/0/217200a062",
"title": "Active Vibration Control of Thin Plate Milling Using Piezoelectric Actuator",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2021/217200a062/1ANLDTny8la",
"parentPublication": {
"id": "proceedings/wcmeim/2021/2172/0",
"title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictech/2022/9694/0/969400a476",
"title": "Research and Application of Wear Prediction Method of NC Milling Cutter Based on Data-Driven",
"doi": null,
"abstractUrl": "/proceedings-article/ictech/2022/969400a476/1FWmpMSoNi0",
"parentPublication": {
"id": "proceedings/ictech/2022/9694/0",
"title": "2022 11th International Conference of Information and Communication Technology (ICTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378223",
"title": "Intelligent Chatter Detection in Milling using Vibration Data Features and Deep Multi-Layer Perceptron",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378223/1s64duwMCTm",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2020/4109/0/410900a175",
"title": "Review of research on chatter stability in milling of thin-walled parts",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2020/410900a175/1t2myNzoq3e",
"parentPublication": {
"id": "proceedings/wcmeim/2020/4109/0",
"title": "2020 3rd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvA1hw4",
"title": "Proceedings. 20th International Conference on Data Engineering",
"acronym": "icde",
"groupId": "1000178",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBtl1xg",
"doi": "10.1109/ICDE.2004.1320043",
"title": "Content-based Three-dimensional Engineering Shape Search",
"normalizedTitle": "Content-based Three-dimensional Engineering Shape Search",
"abstract": "In this paper, we discuss the design and implementation of a prototype 3D Engineering Shape Search system. The system incorporates multiple feature vectors, relevance feedback, and query by example and browsing, flexible definition of shape similarity, and efficient execution through multi-dimensional indexing and clustering. In order to offer more information for a user to determine similarity of 3D engineering shape, a 3D interface that allows users to manipulate shapes is proposed and implemented to present the search results. The system allows users to specify which feature vectors should be used to perform the search. The system is used to conduct extensive experimentation real data to test the effectiveness of various feature vectors for shape - the first such comparison of this type. The test results show that the descending order of the average precision of feature vectors is: principal moments, moment invariants, geometric parameters, and eigenvalues. In addition, a multi-step similarity search strategy is proposed and tested in this paper to improve the effectiveness of 3D engineering shape search. It is shown that the multi-step approach is more effective than the one-shot search approach, when a fixed number of shapes are retrieved.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we discuss the design and implementation of a prototype 3D Engineering Shape Search system. The system incorporates multiple feature vectors, relevance feedback, and query by example and browsing, flexible definition of shape similarity, and efficient execution through multi-dimensional indexing and clustering. In order to offer more information for a user to determine similarity of 3D engineering shape, a 3D interface that allows users to manipulate shapes is proposed and implemented to present the search results. The system allows users to specify which feature vectors should be used to perform the search. The system is used to conduct extensive experimentation real data to test the effectiveness of various feature vectors for shape - the first such comparison of this type. The test results show that the descending order of the average precision of feature vectors is: principal moments, moment invariants, geometric parameters, and eigenvalues. In addition, a multi-step similarity search strategy is proposed and tested in this paper to improve the effectiveness of 3D engineering shape search. It is shown that the multi-step approach is more effective than the one-shot search approach, when a fixed number of shapes are retrieved.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we discuss the design and implementation of a prototype 3D Engineering Shape Search system. The system incorporates multiple feature vectors, relevance feedback, and query by example and browsing, flexible definition of shape similarity, and efficient execution through multi-dimensional indexing and clustering. In order to offer more information for a user to determine similarity of 3D engineering shape, a 3D interface that allows users to manipulate shapes is proposed and implemented to present the search results. The system allows users to specify which feature vectors should be used to perform the search. The system is used to conduct extensive experimentation real data to test the effectiveness of various feature vectors for shape - the first such comparison of this type. The test results show that the descending order of the average precision of feature vectors is: principal moments, moment invariants, geometric parameters, and eigenvalues. In addition, a multi-step similarity search strategy is proposed and tested in this paper to improve the effectiveness of 3D engineering shape search. It is shown that the multi-step approach is more effective than the one-shot search approach, when a fixed number of shapes are retrieved.",
"fno": "20650754",
"keywords": [],
"authors": [
{
"affiliation": "Purdue University, West Lafayette, IN",
"fullName": "K. Lou",
"givenName": "K.",
"surname": "Lou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University, West Lafayette, IN",
"fullName": "S. Prabhakar",
"givenName": "S.",
"surname": "Prabhakar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University, West Lafayette, IN",
"fullName": "K. Ramani",
"givenName": "K.",
"surname": "Ramani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icde",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-03-01T00:00:00",
"pubType": "proceedings",
"pages": "754",
"year": "2004",
"issn": "1063-6382",
"isbn": "0-7695-2065-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "20650745",
"articleId": "12OmNyPQ4P0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "20650766",
"articleId": "12OmNAJVcDd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/2002/1784/0/17840265",
"title": "Shape-Similarity Search of Three-Dimensional Models Using Parameterized Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2002/17840265/12OmNAlvHMg",
"parentPublication": {
"id": "proceedings/pg/2002/1784/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2008/3131/0/3131a208",
"title": "Three Dimensional Face Recognition Using SVM Classifier",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2008/3131a208/12OmNBCZnRJ",
"parentPublication": {
"id": "proceedings/icis/2008/3131/0",
"title": "2008 7th IEEE/ACIS International Conference on Computer and Information Science (ICIS '08)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cinc/2009/3645/1/3645a186",
"title": "Shape Representation and Recognition in High Dimensional Feature Space",
"doi": null,
"abstractUrl": "/proceedings-article/cinc/2009/3645a186/12OmNBp52uC",
"parentPublication": {
"id": "proceedings/cinc/2009/3645/1",
"title": "Computational Intelligence and Natural Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831393",
"title": "Local three-dimensional shape-preserving smoothing without shrinkage",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831393/12OmNCfjeEl",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccima/2001/1312/0/13120266",
"title": "Content-Based Search for 3-D Objects",
"doi": null,
"abstractUrl": "/proceedings-article/iccima/2001/13120266/12OmNrY3LCf",
"parentPublication": {
"id": "proceedings/iccima/2001/1312/0",
"title": "Computational Intelligence and Multimedia Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851f023",
"title": "GIFT: A Real-Time and Scalable 3D Shape Search Engine",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851f023/12OmNwpoFFv",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tpcg/2003/1942/0/19420097",
"title": "Shape-Similarity Search of 3D Models by using Enhanced Shape Functions",
"doi": null,
"abstractUrl": "/proceedings-article/tpcg/2003/19420097/12OmNwvVryw",
"parentPublication": {
"id": "proceedings/tpcg/2003/1942/0",
"title": "Theory and Practice of Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ias/2009/3744/2/3744b015",
"title": "A Multi-view Nonlinear Active Shape Model Based on 3D Transformation Shape Search",
"doi": null,
"abstractUrl": "/proceedings-article/ias/2009/3744b015/12OmNyaXPU7",
"parentPublication": {
"id": "proceedings/ias/2009/3744/2",
"title": "Information Assurance and Security, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2006/03/n0193",
"title": "Three-Dimensional Shape-Structure Comparison Method for Protein Classification",
"doi": null,
"abstractUrl": "/journal/tb/2006/03/n0193/13rRUwbs2eX",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2016/05/07321808",
"title": "Similarity Search of Flexible 3D Molecules Combining Local and Global Shape Descriptors",
"doi": null,
"abstractUrl": "/journal/tb/2016/05/07321808/13rRUy2YLRy",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1lgop7Lmd4Q",
"title": "2009 International Joint Conference on Computational Sciences and Optimization",
"acronym": "cso",
"groupId": "1002829",
"volume": "1",
"displayVolume": "1",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxzMnYF",
"doi": "10.1109/CSO.2009.240",
"title": "Three-Dimensional Shape Recovery Based on Single Historical Image",
"normalizedTitle": "Three-Dimensional Shape Recovery Based on Single Historical Image",
"abstract": "For unable to establish geometrical relationship of coordinates, conventional methods of 3D shape recovery can not be adopted to a great number of precious historical images to recover the 3D shape of the objects and environments in the images. The new method of 3D shape recovery based on single frame historical image is proposed in this paper. The core idea of this research is that, according to the change of the shade of gray of each pixel, the tilt angle and slant angle of the surface vertical vector of each object's pixel in the image is analyzed, then the depth of each pixel is calculated in response to the tilt angle and slant angle. Simulating human being's vision system and the process of thinking of brain, this method makes the process of man's 3D comprehension of single image computerize to obtain 3D digitized data. These 3D digitized data can be used in the fields of the 3D recovery of historical relics and the identification of 3D portrait and stereo literal.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For unable to establish geometrical relationship of coordinates, conventional methods of 3D shape recovery can not be adopted to a great number of precious historical images to recover the 3D shape of the objects and environments in the images. The new method of 3D shape recovery based on single frame historical image is proposed in this paper. The core idea of this research is that, according to the change of the shade of gray of each pixel, the tilt angle and slant angle of the surface vertical vector of each object's pixel in the image is analyzed, then the depth of each pixel is calculated in response to the tilt angle and slant angle. Simulating human being's vision system and the process of thinking of brain, this method makes the process of man's 3D comprehension of single image computerize to obtain 3D digitized data. These 3D digitized data can be used in the fields of the 3D recovery of historical relics and the identification of 3D portrait and stereo literal.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For unable to establish geometrical relationship of coordinates, conventional methods of 3D shape recovery can not be adopted to a great number of precious historical images to recover the 3D shape of the objects and environments in the images. The new method of 3D shape recovery based on single frame historical image is proposed in this paper. The core idea of this research is that, according to the change of the shade of gray of each pixel, the tilt angle and slant angle of the surface vertical vector of each object's pixel in the image is analyzed, then the depth of each pixel is calculated in response to the tilt angle and slant angle. Simulating human being's vision system and the process of thinking of brain, this method makes the process of man's 3D comprehension of single image computerize to obtain 3D digitized data. These 3D digitized data can be used in the fields of the 3D recovery of historical relics and the identification of 3D portrait and stereo literal.",
"fno": "3605a420",
"keywords": [
"Image Reconstruction",
"Stereo Image Processing",
"Three Dimensional Shape Recovery",
"Single Historical Image",
"3 D Shape Recovery",
"Surface Vertical Vector",
"Object Pixel",
"Human Vision System Simulation",
"3 D Digitized Data",
"Pixel",
"Image Converters",
"Optical Imaging",
"Reflectivity",
"Shape Measurement",
"Educational Institutions",
"Image Analysis",
"Brain Modeling",
"Computational Modeling",
"Computer Simulation"
],
"authors": [
{
"affiliation": "Coll. of Comput. Technic & Automatization, TianJin Polytech. Univ., Tianjin, China",
"fullName": "Limei Song",
"givenName": "Limei",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuhua Wen",
"givenName": "Yuhua",
"surname": "Wen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cso",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-04-01T00:00:00",
"pubType": "proceedings",
"pages": "420-422",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3605-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3605a417",
"articleId": "12OmNwLOYTl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3605a423",
"articleId": "12OmNBCHMMX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1993/3880/0/00340970",
"title": "A qualitative approach to quantitative recovery of SHGCs shape and pose from shading and contour",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00340970/12OmNAfgwzh",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/2/73102354",
"title": "3-D shape recovery of hybrid reflectance surface using indirect diffuse illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73102354/12OmNCyTysb",
"parentPublication": {
"id": "proceedings/icip/1995/7310/2",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2910/0/00201617",
"title": "Shape and source from shading using zero crossings",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201617/12OmNrJ11xz",
"parentPublication": {
"id": "proceedings/icpr/1992/2910/0",
"title": "1992 11th IAPR International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cccm/2008/3290/1/3290a133",
"title": "Three-Dimensional Computational Integral Imaging Reconstruction by Use of Pixel Blocks Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/cccm/2008/3290a133/12OmNvStcA6",
"parentPublication": {
"id": "cccm/2008/3290/1",
"title": "Computing, Communication, Control and Management, ISECS International Colloquium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicic/2006/2616/1/26160302",
"title": "Thin-Type Three-dimensional Display Based on the Reconstruction of Parallax Rays",
"doi": null,
"abstractUrl": "/proceedings-article/icicic/2006/26160302/12OmNxWuih1",
"parentPublication": {
"id": "proceedings/icicic/2006/2616/1",
"title": "Innovative Computing ,Information and Control, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761546",
"title": "An evaluation survey of binarization algorithms on historical documents",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761546/12OmNxaw5bY",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413543",
"title": "Recovery of 3-D shape using hybrid reflectance model",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413543/12OmNxxNbTz",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284896",
"title": "Hole Filling on Three-Dimensional Surface Texture",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284896/12OmNy4IF6j",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2010/7081/4/05514049",
"title": "A Quickly Skew Correction Algorithm of Bill Image",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/05514049/12OmNyoSbef",
"parentPublication": {
"id": "proceedings/icic/2010/7081/4",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1995/03/i0266",
"title": "Accurate Recovery of Three-Dimensional Shape from Image Focus",
"doi": null,
"abstractUrl": "/journal/tp/1995/03/i0266/13rRUILtJrX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAkEU4h",
"title": "2006 IEEE/NLM Life Science Systems and Applications Workshop",
"acronym": "lssa",
"groupId": "1001257",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyQGS3f",
"doi": "10.1109/LSSA.2006.250433",
"title": "Multi-Modal Microscope for Biomedical Research",
"normalizedTitle": "Multi-Modal Microscope for Biomedical Research",
"abstract": "Microscopists often use different instruments to exploit the different types of contrast available in complicated biological media. A new multi-modal microscope makes it possible to acquire the different images nearly simultaneously, without moving the specimen. Rather than simply observing different contrast mechanisms in a specimen which has been moved, the user can now obtain registered images in different modes for image fusion or data fusion. The first application area is imaging of embryo health",
"abstracts": [
{
"abstractType": "Regular",
"content": "Microscopists often use different instruments to exploit the different types of contrast available in complicated biological media. A new multi-modal microscope makes it possible to acquire the different images nearly simultaneously, without moving the specimen. Rather than simply observing different contrast mechanisms in a specimen which has been moved, the user can now obtain registered images in different modes for image fusion or data fusion. The first application area is imaging of embryo health",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Microscopists often use different instruments to exploit the different types of contrast available in complicated biological media. A new multi-modal microscope makes it possible to acquire the different images nearly simultaneously, without moving the specimen. Rather than simply observing different contrast mechanisms in a specimen which has been moved, the user can now obtain registered images in different modes for image fusion or data fusion. The first application area is imaging of embryo health",
"fno": "04015834",
"keywords": [
"Optical Quadrature Microscopy",
"Multimodal Microscope",
"Biomedical Research",
"Image Registration",
"Image Fusion",
"Data Fusion",
"Embryo Health",
"Differential Interference Contrast Microscopy",
"Epifluorescence",
"Laser Scanning Confocal Microscopy",
"Reflectance Confocal Microscopy",
"Two Photon Laser Scanning Microscopy"
],
"authors": [
{
"affiliation": "Center for Subsurface Sensing&Imaging Syst., Northeastern Univ., Boston, MA",
"fullName": "G. Laevsky",
"givenName": "G.",
"surname": "Laevsky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "W.C. Warger",
"givenName": "W.C.",
"surname": "Warger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M. Rajadhyaksha",
"givenName": "M.",
"surname": "Rajadhyaksha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "C.A. DiMarzio",
"givenName": "C.A.",
"surname": "DiMarzio",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "lssa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-2",
"year": "2006",
"issn": null,
"isbn": "1-4244-0277-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04015833",
"articleId": "12OmNzV70II",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04015835",
"articleId": "12OmNrHSD42",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1994/6952/1/00413302",
"title": "Multi-sensor image fusion",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413302/12OmNANkoeu",
"parentPublication": {
"id": "proceedings/icip/1994/6952/3",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2009/3656/0/3656a360",
"title": "Multi-Modal Registration of Embryonic Images for In Vitro Fertilization",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2009/3656a360/12OmNAle6z8",
"parentPublication": {
"id": "proceedings/bibe/2009/3656/0",
"title": "2009 Ninth IEEE International Conference on Bioinformatics and Bioengineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2000/0862/0/08620307",
"title": "Reconstructing Specimens Using DIC Microscope Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2000/08620307/12OmNCdBDNt",
"parentPublication": {
"id": "proceedings/bibe/2000/0862/0",
"title": "13th IEEE International Conference on BioInformatics and BioEngineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999830",
"title": "A New Method for Prediction of Corrosion Processes in Metallization Systems for Substrates and Electrical Contacts",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999830/12OmNwFid28",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2006/0310/0/04120318",
"title": "Automation of Challenging Spatial-Temporal Biomedical Observations with the Adaptive Scanning Optical Microscope (ASOM)",
"doi": null,
"abstractUrl": "/proceedings-article/case/2006/04120318/12OmNwvDQwk",
"parentPublication": {
"id": "proceedings/case/2006/0310/0",
"title": "2006 IEEE International Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2015/8221/0/8221a033",
"title": "A Novel Multi-focus Image Fusion Method Research",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2015/8221a033/12OmNx2QUHU",
"parentPublication": {
"id": "proceedings/icinis/2015/8221/0",
"title": "2015 8th International Conference on Intelligent Networks and Intelligent Systems (ICINIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2011/4588/0/4588a319",
"title": "An Evaluation of Multi-resolution Microscope Slide Scanning Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2011/4588a319/12OmNyRxFJK",
"parentPublication": {
"id": "proceedings/dicta/2011/4588/0",
"title": "2011 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/01/v0093",
"title": "Thin Structure Segmentation and Visualization in Three-Dimensional Biomedical Images: A Shape-Based Approach",
"doi": null,
"abstractUrl": "/journal/tg/2006/01/v0093/13rRUILtJqK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061489",
"title": "An interactive visualization tool for multi-channel confocal microscopy data in neurobiology research",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061489/13rRUNvgz4b",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h073",
"title": "Multi-Modal Fusion Transformer for End-to-End Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h073/1yeJa4WXSAE",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNz2kqri",
"title": "2005 IEEE International Symposium on Signal Processing and Information Technology",
"acronym": "isspit",
"groupId": "1001026",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzvz6Jt",
"doi": "10.1109/ISSPIT.2005.1577197",
"title": "Solid model approximation for successive three-dimensional shape processing",
"normalizedTitle": "Solid model approximation for successive three-dimensional shape processing",
"abstract": "This paper proposes a practical, accurate, topologically robust and ranging error resistive shape modeling procedure that approximates a real object, with the matrix-format data structure, for the resulting 3D shape processing. Examples of the shape processing are based on the premise of the virtual manipulation of the 3D shape, such as local shape modification and blending. A geometric model with the desired meshing is directly reconstructed based on a solid modeling approach. The radial distance of each scanning point from the axis of the cylindrical coordinates is measured by laser triangulation. The angular and vertical positions of the laser beam are two other coordinate values of the scanning. A face array listing (topology), which defines the vertex (sampling point) connectivity and the shape of the mesh, is assigned to meet the desired meshing. Topologically stable meshing, and hence, an accurate approximation, free from the shape ambiguity unavoidable in the so-called ICP (iterative closest point) modeling, is then accomplished. This proposal allows not only the versatile and automatic shape reconstruction, but also virtual shape manipulation for various trainings and restorations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a practical, accurate, topologically robust and ranging error resistive shape modeling procedure that approximates a real object, with the matrix-format data structure, for the resulting 3D shape processing. Examples of the shape processing are based on the premise of the virtual manipulation of the 3D shape, such as local shape modification and blending. A geometric model with the desired meshing is directly reconstructed based on a solid modeling approach. The radial distance of each scanning point from the axis of the cylindrical coordinates is measured by laser triangulation. The angular and vertical positions of the laser beam are two other coordinate values of the scanning. A face array listing (topology), which defines the vertex (sampling point) connectivity and the shape of the mesh, is assigned to meet the desired meshing. Topologically stable meshing, and hence, an accurate approximation, free from the shape ambiguity unavoidable in the so-called ICP (iterative closest point) modeling, is then accomplished. This proposal allows not only the versatile and automatic shape reconstruction, but also virtual shape manipulation for various trainings and restorations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a practical, accurate, topologically robust and ranging error resistive shape modeling procedure that approximates a real object, with the matrix-format data structure, for the resulting 3D shape processing. Examples of the shape processing are based on the premise of the virtual manipulation of the 3D shape, such as local shape modification and blending. A geometric model with the desired meshing is directly reconstructed based on a solid modeling approach. The radial distance of each scanning point from the axis of the cylindrical coordinates is measured by laser triangulation. The angular and vertical positions of the laser beam are two other coordinate values of the scanning. A face array listing (topology), which defines the vertex (sampling point) connectivity and the shape of the mesh, is assigned to meet the desired meshing. Topologically stable meshing, and hence, an accurate approximation, free from the shape ambiguity unavoidable in the so-called ICP (iterative closest point) modeling, is then accomplished. This proposal allows not only the versatile and automatic shape reconstruction, but also virtual shape manipulation for various trainings and restorations.",
"fno": "01577197",
"keywords": [
"Automatic Shape Reconstruction",
"Successive 3 D Shape Processing",
"Solid Model Approximation",
"Matrix Format Data Structure",
"Local Shape Modification",
"Local Shape Blending",
"Laser Triangulation",
"Face Array Listing",
"Iterative Closest Point",
"Virtual Shape Manipulation"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., Chiba Inst. of Technol., Narashino, Japan",
"fullName": "Junta Doi",
"givenName": null,
"surname": "Junta Doi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Chiba Inst. of Technol., Narashino, Japan",
"fullName": "W. Sato",
"givenName": "W.",
"surname": "Sato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isspit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-12-01T00:00:00",
"pubType": "proceedings",
"pages": "778-783",
"year": "2005",
"issn": null,
"isbn": "0-7803-9313-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01577196",
"articleId": "12OmNzIUfJa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01577198",
"articleId": "12OmNzEVS1q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/visapp/2014/8133/3/07295136",
"title": "Three-dimensional visual reconstruction of path shape using a cart with a laser scanner",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295136/12OmNAlvHyp",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00340997",
"title": "Three-dimensional shape reconstruction by active rangefinder",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00340997/12OmNAo45On",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831393",
"title": "Local three-dimensional shape-preserving smoothing without shrinkage",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831393/12OmNCfjeEl",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420834",
"title": "Adaptive shape evolution using blending",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420834/12OmNCmpcNR",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031f231",
"title": "Finite Element Analysis and Shape Optimization on Gear of Tooth Modification",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031f231/12OmNrAMF2G",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223192",
"title": "Local shape approximation from shading",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223192/12OmNxWLTjh",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2009/3605/1/3605a607",
"title": "Energy-Based Shape Modification of NURBS Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2009/3605a607/12OmNyen1pG",
"parentPublication": {
"id": "cso/2009/3605/1",
"title": "2009 International Joint Conference on Computational Sciences and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/02/mcg2005020036",
"title": "Bounded Blending for Function-Based Shape Modeling",
"doi": null,
"abstractUrl": "/magazine/cg/2005/02/mcg2005020036/13rRUwjoNCb",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1998/11/i1186",
"title": "Shape Evolution With Structural and Topological Changes Using Blending",
"doi": null,
"abstractUrl": "/journal/tp/1998/11/i1186/13rRUxASuBn",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2012/06/06109276",
"title": "Medial Spheres for Shape Approximation",
"doi": null,
"abstractUrl": "/journal/tp/2012/06/06109276/13rRUxOdD9s",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8Ounz",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBtl1sT",
"doi": "10.1109/HAPTIC.2010.5444635",
"title": "Effects of haptic guidance and disturbance on motor learning: Potential advantage of haptic disturbance",
"normalizedTitle": "Effects of haptic guidance and disturbance on motor learning: Potential advantage of haptic disturbance",
"abstract": "One of the primary goals of haptic guidance is to facilitate the learning of complex human motor skills by providing haptic cues that are helpful to induce desired movements. Nevertheless, a majority of previous studies have found that haptic guidance is ineffective, or sometimes even detrimental, to motor skill learning. In this paper, we propose the opposite concept, haptic disturbance, and evaluate its efficacy. In haptic disturbance, haptic cues that interfere with the movements of a learner are presented during training. We designed two methods of haptic disturbance using repulsive and noise-like forces, respectively. The effects of these methods were experimentally assessed, comparatively with the conventional methods of visual learning only and progressive haptic guidance. The motor task was to track a dot moving on a 2D plane with a haptic interface operated with one arm. We found that during training, the progressive haptic guidance showed the best tracking accuracy, but in immediate and delayed retention tests, the noise-like haptic disturbance led to the best performance. The results suggest high potentials for haptic disturbance to be a general strategy for expediting the motor learning process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the primary goals of haptic guidance is to facilitate the learning of complex human motor skills by providing haptic cues that are helpful to induce desired movements. Nevertheless, a majority of previous studies have found that haptic guidance is ineffective, or sometimes even detrimental, to motor skill learning. In this paper, we propose the opposite concept, haptic disturbance, and evaluate its efficacy. In haptic disturbance, haptic cues that interfere with the movements of a learner are presented during training. We designed two methods of haptic disturbance using repulsive and noise-like forces, respectively. The effects of these methods were experimentally assessed, comparatively with the conventional methods of visual learning only and progressive haptic guidance. The motor task was to track a dot moving on a 2D plane with a haptic interface operated with one arm. We found that during training, the progressive haptic guidance showed the best tracking accuracy, but in immediate and delayed retention tests, the noise-like haptic disturbance led to the best performance. The results suggest high potentials for haptic disturbance to be a general strategy for expediting the motor learning process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the primary goals of haptic guidance is to facilitate the learning of complex human motor skills by providing haptic cues that are helpful to induce desired movements. Nevertheless, a majority of previous studies have found that haptic guidance is ineffective, or sometimes even detrimental, to motor skill learning. In this paper, we propose the opposite concept, haptic disturbance, and evaluate its efficacy. In haptic disturbance, haptic cues that interfere with the movements of a learner are presented during training. We designed two methods of haptic disturbance using repulsive and noise-like forces, respectively. The effects of these methods were experimentally assessed, comparatively with the conventional methods of visual learning only and progressive haptic guidance. The motor task was to track a dot moving on a 2D plane with a haptic interface operated with one arm. We found that during training, the progressive haptic guidance showed the best tracking accuracy, but in immediate and delayed retention tests, the noise-like haptic disturbance led to the best performance. The results suggest high potentials for haptic disturbance to be a general strategy for expediting the motor learning process.",
"fno": "05444635",
"keywords": [
"Computer Based Training",
"Graphical User Interfaces",
"Haptic Interfaces",
"Human Factors",
"Haptic Guidance",
"Haptic Disturbance",
"Human Motor Skill Learning",
"Haptic Cues",
"Repulsive Force",
"Noise Like Force",
"Haptic Interfaces",
"Feedback",
"Testing",
"Virtual Reality",
"Humans",
"Computer Graphics",
"Computational Modeling",
"Computer Simulation",
"Laboratories",
"Computer Science",
"H 1 2 Information Systems User Machines Systems Human Information Processing",
"H 5 2 Information Interfaces And Presentation User Interfaces Haptic I O",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality"
],
"authors": [
{
"affiliation": "Haptics and Virtual Reality Laboratory, Department of Computer Science and Technology, Pohang University of Science and Technology (POSTECH), Republic of Korea",
"fullName": "Jaebong Lee",
"givenName": "Jaebong",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Haptics and Virtual Reality Laboratory, Department of Computer Science and Technology, Pohang University of Science and Technology (POSTECH), Republic of Korea",
"fullName": "Seungmoon Choi",
"givenName": null,
"surname": "Seungmoon Choi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-03-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2010",
"issn": "2324-7347",
"isbn": "978-1-4244-6821-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05444634",
"articleId": "12OmNwCsdKu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05444632",
"articleId": "12OmNx5GU9T",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2010/6821/0/05444633",
"title": "A vibrotactile feedback approach to posture guidance",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444633/12OmNAo45Ki",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2009/3858/0/04810914",
"title": "Visual versus haptic progressive guidance for training in a virtual dynamic task",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2009/04810914/12OmNApcuo3",
"parentPublication": {
"id": "proceedings/whc/2009/3858/0",
"title": "World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479984",
"title": "Haptic Guidance Benefits Musical Motor Learning",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479984/12OmNButq3Q",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptic/2006/0226/0/01627128",
"title": "Haptic Attributes and Human Motor Skills",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627128/12OmNvpNIvU",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cerma/2009/3799/0/3799a175",
"title": "Haptic Guidance Based on Sub-optimal Passivity Control",
"doi": null,
"abstractUrl": "/proceedings-article/cerma/2009/3799a175/12OmNwwMf2P",
"parentPublication": {
"id": "proceedings/cerma/2009/3799/0",
"title": "Electronics, Robotics and Automotive Mechanics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444632",
"title": "Progressive haptic and visual guidance for training in a virtual dynamic task",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444632/12OmNx5GU9T",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890040",
"title": "Haptic Guidance: Experimental Evaluation of a Haptic Training Method for a Perceptual Motor Skill",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890040/12OmNy68EJI",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07378499",
"title": "Movement Strategy Discovery during Training via Haptic Guidance",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07378499/13rRUxBa5xt",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/03/tth2012030208",
"title": "The Task-Dependent Efficacy of Shared-Control Haptic Guidance Paradigms",
"doi": null,
"abstractUrl": "/journal/th/2012/03/tth2012030208/13rRUxNEqQ4",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/02/06967807",
"title": "The Effect of Haptic Guidance on Learning a Hybrid Rhythmic-Discrete Motor Task",
"doi": null,
"abstractUrl": "/journal/th/2015/02/06967807/13rRUxcKzVq",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNApcuag",
"title": "IEEE Haptics Symposium 2008",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNButq3Q",
"doi": "10.1109/HAPTICS.2008.4479984",
"title": "Haptic Guidance Benefits Musical Motor Learning",
"normalizedTitle": "Haptic Guidance Benefits Musical Motor Learning",
"abstract": "This paper presents the results of a pilot experiment looking at the effect of haptic guidance on musical training. A percussion performance task was used where subjects learned to play short rhythmic sequences on a device capable of recording drumstick movements with a high degree of spatiotemporal accuracy. Subjects learned to perform the sequences under three primary training paradigms: listening to the rhythm (audio), being guided through the motions involved in the rhythm's performance (haptic), and being guided through the required motions while listening to the resulting sound (audio+haptic). Performance was assessed in terms of both timing and loudness (velocity) accuracy using several different metrics. 	Results indicate that haptic guidance can significantly benefit recall of both note timing and velocity. When subject performance was compared in terms of note velocity recall, the addition of haptic guidance to audio-based training produced a 17% reduction in final error when compared to audio training alone. When performance was evaluated in terms of liming recall, the combination of audio and haptic guidance led to an 18% reduction in early-stage error.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents the results of a pilot experiment looking at the effect of haptic guidance on musical training. A percussion performance task was used where subjects learned to play short rhythmic sequences on a device capable of recording drumstick movements with a high degree of spatiotemporal accuracy. Subjects learned to perform the sequences under three primary training paradigms: listening to the rhythm (audio), being guided through the motions involved in the rhythm's performance (haptic), and being guided through the required motions while listening to the resulting sound (audio+haptic). Performance was assessed in terms of both timing and loudness (velocity) accuracy using several different metrics. 	Results indicate that haptic guidance can significantly benefit recall of both note timing and velocity. When subject performance was compared in terms of note velocity recall, the addition of haptic guidance to audio-based training produced a 17% reduction in final error when compared to audio training alone. When performance was evaluated in terms of liming recall, the combination of audio and haptic guidance led to an 18% reduction in early-stage error.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents the results of a pilot experiment looking at the effect of haptic guidance on musical training. A percussion performance task was used where subjects learned to play short rhythmic sequences on a device capable of recording drumstick movements with a high degree of spatiotemporal accuracy. Subjects learned to perform the sequences under three primary training paradigms: listening to the rhythm (audio), being guided through the motions involved in the rhythm's performance (haptic), and being guided through the required motions while listening to the resulting sound (audio+haptic). Performance was assessed in terms of both timing and loudness (velocity) accuracy using several different metrics. \tResults indicate that haptic guidance can significantly benefit recall of both note timing and velocity. When subject performance was compared in terms of note velocity recall, the addition of haptic guidance to audio-based training produced a 17% reduction in final error when compared to audio training alone. When performance was evaluated in terms of liming recall, the combination of audio and haptic guidance led to an 18% reduction in early-stage error.",
"fno": "04479984",
"keywords": [
"Audio User Interfaces",
"Computer Based Training",
"Haptic Interfaces",
"Music",
"Haptic Guidance",
"Musical Motor Learning",
"Drumstick Movement Recording",
"Audio Based Training",
"Haptic Interfaces",
"Timing",
"Rhythm",
"Computer Errors",
"Feedback",
"System Testing",
"Optimal Control",
"Laboratories",
"Audio Recording",
"Spatiotemporal Phenomena",
"H 5 2 Information Interfaces And Presentation User Interfaces Haptic I O",
"K 3 0 Computers And Education General"
],
"authors": [
{
"affiliation": "Media Laboratory, Massachusetts Institute of Technology, e-mail: grindlay@mit.edu",
"fullName": "Graham Grindlay",
"givenName": "Graham",
"surname": "Grindlay",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-03-01T00:00:00",
"pubType": "proceedings",
"pages": "397-404",
"year": "2008",
"issn": "2324-7347",
"isbn": "978-1-4244-2005-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04479968",
"articleId": "12OmNykCcfG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04479969",
"articleId": "12OmNA14Adq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2010/6821/0/05444635",
"title": "Effects of haptic guidance and disturbance on motor learning: Potential advantage of haptic disturbance",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444635/12OmNBtl1sT",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145145",
"title": "Haptic Feedback Enhances Force Skill Learning",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145145/12OmNrNh0Ci",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836465",
"title": "Visual Guidance for Encountered Type Haptic Display: A feasibility study",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836465/12OmNwkzutD",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cerma/2009/3799/0/3799a175",
"title": "Haptic Guidance Based on Sub-optimal Passivity Control",
"doi": null,
"abstractUrl": "/proceedings-article/cerma/2009/3799a175/12OmNwwMf2P",
"parentPublication": {
"id": "proceedings/cerma/2009/3799/0",
"title": "Electronics, Robotics and Automotive Mechanics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444632",
"title": "Progressive haptic and visual guidance for training in a virtual dynamic task",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444632/12OmNx5GU9T",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890040",
"title": "Haptic Guidance: Experimental Evaluation of a Haptic Training Method for a Perceptual Motor Skill",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890040/12OmNy68EJI",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07378499",
"title": "Movement Strategy Discovery during Training via Haptic Guidance",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07378499/13rRUxBa5xt",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/03/tth2012030208",
"title": "The Task-Dependent Efficacy of Shared-Control Haptic Guidance Paradigms",
"doi": null,
"abstractUrl": "/journal/th/2012/03/tth2012030208/13rRUxNEqQ4",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/02/06967807",
"title": "The Effect of Haptic Guidance on Learning a Hybrid Rhythmic-Discrete Motor Task",
"doi": null,
"abstractUrl": "/journal/th/2015/02/06967807/13rRUxcKzVq",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/02/07047839",
"title": "Haptic Shared Control in Tele-Manipulation: Effects of Inaccuracies in Guidance on Task Execution",
"doi": null,
"abstractUrl": "/journal/th/2015/02/07047839/13rRUxd2aZd",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwwMf3H",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismarw",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwkzutD",
"doi": "10.1109/ISMAR-Adjunct.2016.0044",
"title": "Visual Guidance for Encountered Type Haptic Display: A feasibility study",
"normalizedTitle": "Visual Guidance for Encountered Type Haptic Display: A feasibility study",
"abstract": "Virtual/mixed reality leveraging an encountered type haptic display will suffer difficulty if virtual and real objects are spatially discrepant. We propose a new method for resolving this issue, visual guidance. The visual guidance algorithm is defined and described in detail, and contrasted with a previously explored approach. The feasibility of the proposed algorithm is experimentally verified.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual/mixed reality leveraging an encountered type haptic display will suffer difficulty if virtual and real objects are spatially discrepant. We propose a new method for resolving this issue, visual guidance. The visual guidance algorithm is defined and described in detail, and contrasted with a previously explored approach. The feasibility of the proposed algorithm is experimentally verified.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual/mixed reality leveraging an encountered type haptic display will suffer difficulty if virtual and real objects are spatially discrepant. We propose a new method for resolving this issue, visual guidance. The visual guidance algorithm is defined and described in detail, and contrasted with a previously explored approach. The feasibility of the proposed algorithm is experimentally verified.",
"fno": "07836465",
"keywords": [
"Computer Displays",
"Haptic Interfaces",
"Virtual Reality",
"Visual Guidance Algorithm",
"Encountered Type Haptic Display",
"Feasibility Study",
"Virtual Reality",
"Mixed Reality",
"Haptic Interfaces",
"Visualization",
"Training",
"Robots",
"Error Analysis",
"Safety",
"Presses",
"Haptic Augmented Virtuality",
"Spatial Discrepancy",
"Encountered Type Haptic Display",
"Visual Guidance"
],
"authors": [
{
"affiliation": null,
"fullName": "Chang-Gyu Lee",
"givenName": "Chang-Gyu",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gregory Lynn Dunn",
"givenName": "Gregory Lynn",
"surname": "Dunn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ian Oakley",
"givenName": "Ian",
"surname": "Oakley",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jeha Ryu",
"givenName": "Jeha",
"surname": "Ryu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "74-77",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3740-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07836464",
"articleId": "12OmNAo45F9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07836466",
"articleId": "12OmNx7G5Tm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2010/6821/0/05444635",
"title": "Effects of haptic guidance and disturbance on motor learning: Potential advantage of haptic disturbance",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444635/12OmNBtl1sT",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479984",
"title": "Haptic Guidance Benefits Musical Motor Learning",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479984/12OmNButq3Q",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444632",
"title": "Progressive haptic and visual guidance for training in a virtual dynamic task",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444632/12OmNx5GU9T",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892375",
"title": "Experiencing guidance in 3D spaces with a vibrotactile head-mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892375/12OmNy5hRo2",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2015/8146/0/07340503",
"title": "A Haptic-Assisted Guidance System for working machines based on virtual force fields",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2015/07340503/12OmNy5zskG",
"parentPublication": {
"id": "proceedings/icat/2015/8146/0",
"title": "2015 XXV International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07378499",
"title": "Movement Strategy Discovery during Training via Haptic Guidance",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07378499/13rRUxBa5xt",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/03/tth2012030208",
"title": "The Task-Dependent Efficacy of Shared-Control Haptic Guidance Paradigms",
"doi": null,
"abstractUrl": "/journal/th/2012/03/tth2012030208/13rRUxNEqQ4",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/02/06967807",
"title": "The Effect of Haptic Guidance on Learning a Hybrid Rhythmic-Discrete Motor Task",
"doi": null,
"abstractUrl": "/journal/th/2015/02/06967807/13rRUxcKzVq",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/02/07047839",
"title": "Haptic Shared Control in Tele-Manipulation: Effects of Inaccuracies in Guidance on Task Execution",
"doi": null,
"abstractUrl": "/journal/th/2015/02/07047839/13rRUxd2aZd",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a382",
"title": "FrictionHaptics : Encountered-Type Haptic Device forTangential Friction Emulation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a382/1gysjOSOaeA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyjLoQJ",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy68EJI",
"doi": "10.1109/HAPTIC.2002.998939",
"title": "Haptic Guidance: Experimental Evaluation of a Haptic Training Method for a Perceptual Motor Skill",
"normalizedTitle": "Haptic Guidance: Experimental Evaluation of a Haptic Training Method for a Perceptual Motor Skill",
"abstract": "In this paper we investigate a use of haptics for skills training which we call haptic guidance. In the haptic guidance paradigm, the subject is physically guided through the ideal motion by the haptic interface, thus giving the subject a kinesthetic understanding of what is required. Subjects learned a complex 3-D motion under three training conditions (haptic, visual, haptic and visual) and were required to manually reproduce the movement under two recall conditions (with vision, without vision). Performance was measured in terms of position, shape, timing, and drift. Findings from this study indicate that haptic guidance is effective in training. While visual training was better for teaching the trajectory shape, temporal aspects of the task were more effectively learned from haptic guidance. This supports a possible role for haptics in the training of perceptual motor skills in virtual environments.}",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we investigate a use of haptics for skills training which we call haptic guidance. In the haptic guidance paradigm, the subject is physically guided through the ideal motion by the haptic interface, thus giving the subject a kinesthetic understanding of what is required. Subjects learned a complex 3-D motion under three training conditions (haptic, visual, haptic and visual) and were required to manually reproduce the movement under two recall conditions (with vision, without vision). Performance was measured in terms of position, shape, timing, and drift. Findings from this study indicate that haptic guidance is effective in training. While visual training was better for teaching the trajectory shape, temporal aspects of the task were more effectively learned from haptic guidance. This supports a possible role for haptics in the training of perceptual motor skills in virtual environments.}",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we investigate a use of haptics for skills training which we call haptic guidance. In the haptic guidance paradigm, the subject is physically guided through the ideal motion by the haptic interface, thus giving the subject a kinesthetic understanding of what is required. Subjects learned a complex 3-D motion under three training conditions (haptic, visual, haptic and visual) and were required to manually reproduce the movement under two recall conditions (with vision, without vision). Performance was measured in terms of position, shape, timing, and drift. Findings from this study indicate that haptic guidance is effective in training. While visual training was better for teaching the trajectory shape, temporal aspects of the task were more effectively learned from haptic guidance. This supports a possible role for haptics in the training of perceptual motor skills in virtual environments.}",
"fno": "14890040",
"keywords": [
"Haptics",
"Training",
"Guidance",
"Virtual Environments",
"Motor Learning"
],
"authors": [
{
"affiliation": "University of California at Berkeley",
"fullName": "David Feygin",
"givenName": "David",
"surname": "Feygin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at San Francisco",
"fullName": "Madeleine Keehner",
"givenName": "Madeleine",
"surname": "Keehner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at San Francisco and University of California at Berkeley",
"fullName": "Frank Tendick",
"givenName": "Frank",
"surname": "Tendick",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-03-01T00:00:00",
"pubType": "proceedings",
"pages": "40",
"year": "2002",
"issn": null,
"isbn": "0-7695-1489-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "14890032",
"articleId": "12OmNxy4MYb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "14890051",
"articleId": "12OmNAhOUJ8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqHItAf",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyp9Mn4",
"doi": "10.1109/VR.2006.26",
"title": "Analysis of Spatially Constrained Reaching Movements in Haptic Environments",
"normalizedTitle": "Analysis of Spatially Constrained Reaching Movements in Haptic Environments",
"abstract": "A distributed system with interchangeable constraints for studying skillful human movements via haptic displays is presented. A unified interface provides easy linking of various physical models with 2D and 3D static spatial constraints, and the graphical contents related to the models as well. The motion analysis is based on the data recorded by system?s History Unit with a frequency of 100Hz. Theoretical and experimental kinematic profiles are compared for several cases of basic reaching rest-to-rest tasks: curve-constrained motions with different curvatures, flexible object control, and cooperative twoarm movements. The experimental patterns exhibit the best agreement with the optimal control models based on jerk and force-change minimization criteria.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A distributed system with interchangeable constraints for studying skillful human movements via haptic displays is presented. A unified interface provides easy linking of various physical models with 2D and 3D static spatial constraints, and the graphical contents related to the models as well. The motion analysis is based on the data recorded by system?s History Unit with a frequency of 100Hz. Theoretical and experimental kinematic profiles are compared for several cases of basic reaching rest-to-rest tasks: curve-constrained motions with different curvatures, flexible object control, and cooperative twoarm movements. The experimental patterns exhibit the best agreement with the optimal control models based on jerk and force-change minimization criteria.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A distributed system with interchangeable constraints for studying skillful human movements via haptic displays is presented. A unified interface provides easy linking of various physical models with 2D and 3D static spatial constraints, and the graphical contents related to the models as well. The motion analysis is based on the data recorded by system?s History Unit with a frequency of 100Hz. Theoretical and experimental kinematic profiles are compared for several cases of basic reaching rest-to-rest tasks: curve-constrained motions with different curvatures, flexible object control, and cooperative twoarm movements. The experimental patterns exhibit the best agreement with the optimal control models based on jerk and force-change minimization criteria.",
"fno": "02260062",
"keywords": [
"Haptic I O",
"Control Theory",
"Kinematics And Dynamics"
],
"authors": [
{
"affiliation": "3D Incorporated,Japan",
"fullName": "Igor Goncharenko",
"givenName": "Igor",
"surname": "Goncharenko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "3D Incorporated,Japan",
"fullName": "Yutaka Kanou",
"givenName": "Yutaka",
"surname": "Kanou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Bio-Mimetic Control Research Center,RIKEN, Japan",
"fullName": "Mikhail Svinin",
"givenName": "Mikhail",
"surname": "Svinin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Bio-Mimetic Control Research Center,RIKEN, Japan",
"fullName": "Shigeyuki Hosoe",
"givenName": "Shigeyuki",
"surname": "Hosoe",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-03-01T00:00:00",
"pubType": "proceedings",
"pages": "62",
"year": "2006",
"issn": null,
"isbn": "1-4244-0226-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "02260061",
"articleId": "12OmNxaNGhx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "02260063",
"articleId": "12OmNqBtiYw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2006/0226/0/02260033",
"title": "Standardized Evaluation of Haptic Rendering Systems",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260033/12OmNAObbDP",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479936",
"title": "Haptic Solutions and Bio-Mimetically Inspired Motion Planning Strategy for Rolling-Based Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479936/12OmNqHItus",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260058",
"title": "Performance Enhancement of a Haptic Arm Exoskeleton",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260058/12OmNscxj93",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/27380033",
"title": "On the Influence of Arm Inertia and Configuration on Motion Planning of Reaching Movements in Haptic Environments",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/27380033/12OmNwGIcxm",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260023",
"title": "Adaptation of Haptic Interfaces for a LabVIEW-based System Dynamics Course",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260023/12OmNxXl5BV",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptic/2006/0226/0/01627107",
"title": "Analysis of Spatially Constrained Reaching Movements in Haptic Environments",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627107/12OmNzZ5ofS",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/01/v0103",
"title": "Predictive Haptic Guidance: Intelligent User Assistance for the Control of Dynamic Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2006/01/v0103/13rRUwcS1CJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07378499",
"title": "Movement Strategy Discovery during Training via Haptic Guidance",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07378499/13rRUxBa5xt",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/06/v0661",
"title": "Six Degree-of-Freedom Haptic Rendering Using Spatialized Normal Cone Search",
"doi": null,
"abstractUrl": "/journal/tg/2005/06/v0661/13rRUyp7tWN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxA3Z4D",
"title": "World Haptics Conference",
"acronym": "whc",
"groupId": "1001635",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzd7bWL",
"doi": "10.1109/WHC.2009.4810805",
"title": "Performance improvement with haptic assistance: A quantitative assessment",
"normalizedTitle": "Performance improvement with haptic assistance: A quantitative assessment",
"abstract": "We measure the performance improvement that force feedback can provide in a virtual environment, through three experiments with, and without the assistance of haptic guidance. Performance measurements were undertaken with haptic, visual and auditory feedback alternatives. The first task investigated the use of haptic guidance mimicking reality, in the form of a simulated touchable surface of an object. The second investigated haptic guidance which waxed and waned as the user violated program rules by varying amounts. The third experiment investigated whether this latter artificial guidance would inhibit the user's free will by taking control out of their hands. The results showed that a significant improvement in both accuracy and speed was achieved by the introduction of haptics in all experiments. It also found that the haptic guidance did not take control away from the user and that they had significantly more control than with conventional warning methods. These experiments were not aimed at learning, or retention of skill, but on using haptics as an aid to improve performance during a task.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We measure the performance improvement that force feedback can provide in a virtual environment, through three experiments with, and without the assistance of haptic guidance. Performance measurements were undertaken with haptic, visual and auditory feedback alternatives. The first task investigated the use of haptic guidance mimicking reality, in the form of a simulated touchable surface of an object. The second investigated haptic guidance which waxed and waned as the user violated program rules by varying amounts. The third experiment investigated whether this latter artificial guidance would inhibit the user's free will by taking control out of their hands. The results showed that a significant improvement in both accuracy and speed was achieved by the introduction of haptics in all experiments. It also found that the haptic guidance did not take control away from the user and that they had significantly more control than with conventional warning methods. These experiments were not aimed at learning, or retention of skill, but on using haptics as an aid to improve performance during a task.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We measure the performance improvement that force feedback can provide in a virtual environment, through three experiments with, and without the assistance of haptic guidance. Performance measurements were undertaken with haptic, visual and auditory feedback alternatives. The first task investigated the use of haptic guidance mimicking reality, in the form of a simulated touchable surface of an object. The second investigated haptic guidance which waxed and waned as the user violated program rules by varying amounts. The third experiment investigated whether this latter artificial guidance would inhibit the user's free will by taking control out of their hands. The results showed that a significant improvement in both accuracy and speed was achieved by the introduction of haptics in all experiments. It also found that the haptic guidance did not take control away from the user and that they had significantly more control than with conventional warning methods. These experiments were not aimed at learning, or retention of skill, but on using haptics as an aid to improve performance during a task.",
"fno": "04810805",
"keywords": [],
"authors": [
{
"affiliation": "Commonwealth Scientific Industrial, Research Organisation (CSIRO), Australia",
"fullName": "Chris Gunn",
"givenName": "Chris",
"surname": "Gunn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Commonwealth Scientific Industrial, Research Organisation (CSIRO), Australia",
"fullName": "Warren Muller",
"givenName": "Warren",
"surname": "Muller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Science and Software Engineering, University of Western Australia, Australia",
"fullName": "Amitava Datta",
"givenName": "Amitava",
"surname": "Datta",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "whc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-03-01T00:00:00",
"pubType": "proceedings",
"pages": "511-516",
"year": "2009",
"issn": null,
"isbn": "978-1-4244-3858-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04810804",
"articleId": "12OmNzxyiO3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04810806",
"articleId": "12OmNwDSdDO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccae/2009/3569/0/3569a160",
"title": "Haptic Device Application in Persian Calligraphy",
"doi": null,
"abstractUrl": "/proceedings-article/iccae/2009/3569a160/12OmNBkP3wM",
"parentPublication": {
"id": "proceedings/iccae/2009/3569/0",
"title": "2009 International Conference on Computer and Automation Engineering. ICCAE 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2005/2310/0/23100452",
"title": "Motor Skill Training Assistance Using Haptic Attributes",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2005/23100452/12OmNvlxJoQ",
"parentPublication": {
"id": "proceedings/whc/2005/2310/0",
"title": "Proceedings. First Joint Eurohaptics Conference and Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems. World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890040",
"title": "Haptic Guidance: Experimental Evaluation of a Haptic Training Method for a Perceptual Motor Skill",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890040/12OmNy68EJI",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vsmm/2001/1402/0/14020844",
"title": "Investigation of Haptic Framework for Quantitative Design Analysis in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vsmm/2001/14020844/12OmNyrIark",
"parentPublication": {
"id": "proceedings/vsmm/2001/1402/0",
"title": "Proceedings Seventh International Conference on Virtual Systems and MultiMedia Enhanced Realities: Augmented and Unplugged",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1999/0234/0/02340035",
"title": "Development of a Haptic Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1999/02340035/12OmNzYeB2C",
"parentPublication": {
"id": "proceedings/cbms/1999/0234/0",
"title": "Proceedings 12th IEEE Symposium on Computer-Based Medical Systems (Cat. No.99CB36365)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2000/0478/0/04780241",
"title": "The Haptic Interfaces of the Next Decade",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780241/12OmNzmclE3",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2014/02/mmu2014020011",
"title": "Toward Haptic Cinematography: Enhancing Movie Experiences with Camera-Based Haptic Effects",
"doi": null,
"abstractUrl": "/magazine/mu/2014/02/mmu2014020011/13rRUILtJnT",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07378499",
"title": "Movement Strategy Discovery during Training via Haptic Guidance",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07378499/13rRUxBa5xt",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/03/06783759",
"title": "A Survey on Bimanual Haptic Interaction",
"doi": null,
"abstractUrl": "/journal/th/2014/03/06783759/13rRUxYIMVe",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797956",
"title": "Haptic Compass: Active Vibrotactile Feedback of Physical Object for Path Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797956/1cJ17BLEK88",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNviZlGi",
"title": "High Performance Computing and Grid in Asia Pacific Region, International Conference on",
"acronym": "hpcasia",
"groupId": "1000321",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBAIAPV",
"doi": "10.1109/HPC.1997.592218",
"title": "Solution of viscoelastic scattering problems in linear acoustics using hp boundary/finite element method",
"normalizedTitle": "Solution of viscoelastic scattering problems in linear acoustics using hp boundary/finite element method",
"abstract": "The interaction of acoustic waves with submerged structures remains one of the most difficult and challenging problems in underwater acoustics. Many techniques such as coupled Boundary Element (BE)/Finite Element (FE) or coupled Infinite Element (IE)/Finite Element approximations have evolved. In this paper, we focus on the steady-state formulation only, and study a general coupled hp-adaptive BE/FE method. The work is devoted to a study on viscoelastic scattering problems in linear acoustic medium by means of hp boundary and finite element approximations. A particular emphasis is placed on an a-posteriori error estimation for the viscoelastic scattering problems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The interaction of acoustic waves with submerged structures remains one of the most difficult and challenging problems in underwater acoustics. Many techniques such as coupled Boundary Element (BE)/Finite Element (FE) or coupled Infinite Element (IE)/Finite Element approximations have evolved. In this paper, we focus on the steady-state formulation only, and study a general coupled hp-adaptive BE/FE method. The work is devoted to a study on viscoelastic scattering problems in linear acoustic medium by means of hp boundary and finite element approximations. A particular emphasis is placed on an a-posteriori error estimation for the viscoelastic scattering problems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The interaction of acoustic waves with submerged structures remains one of the most difficult and challenging problems in underwater acoustics. Many techniques such as coupled Boundary Element (BE)/Finite Element (FE) or coupled Infinite Element (IE)/Finite Element approximations have evolved. In this paper, we focus on the steady-state formulation only, and study a general coupled hp-adaptive BE/FE method. The work is devoted to a study on viscoelastic scattering problems in linear acoustic medium by means of hp boundary and finite element approximations. A particular emphasis is placed on an a-posteriori error estimation for the viscoelastic scattering problems.",
"fno": "79010611",
"keywords": [
"Underwater Sound Viscoelastic Scattering Problems Linear Acoustics Hp Boundary Element Method Finite Element Method Acoustic Waves Submerged Structures Underwater Acoustics Steady State Formulation Linear Acoustic Medium A Posteriori Error Estimation"
],
"authors": [
{
"affiliation": "Nat. Centre for High-Performance Comput., Hsinchu, Taiwan",
"fullName": "Yao-Chang Chang",
"givenName": "Yao-Chang",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nat. Centre for High-Performance Comput., Hsinchu, Taiwan",
"fullName": "L. Demkowicz",
"givenName": "L.",
"surname": "Demkowicz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hpcasia",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-04-01T00:00:00",
"pubType": "proceedings",
"pages": "611",
"year": "1997",
"issn": null,
"isbn": "0-8186-7901-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "79010605",
"articleId": "12OmNx7ouX6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "79010617",
"articleId": "12OmNy3AgvV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzy7uNY",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"acronym": "icmtma",
"groupId": "1002837",
"volume": "3",
"displayVolume": "3",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx76THU",
"doi": "10.1109/ICMTMA.2010.584",
"title": "The High Accuracy Simulation of Resistivity LWD Electric Field Using Self-Adaptive hp-FEM",
"normalizedTitle": "The High Accuracy Simulation of Resistivity LWD Electric Field Using Self-Adaptive hp-FEM",
"abstract": "Nowadays, the worldwide petroleum industry has significantly progressed, resistivity LWD has become the key technique of complex oil reservoir development and been used widely. The accurate simulation of electric field plays an important part in the LWD instrument measurement data analysis and explains. It also has considerable merit in the design of high-efficient, cost-effective resistivity LWD instruments. The simulation of electric field performed with a self-adaptive hp-finite element method which inherits the advantage of FEM and essential to solve problem which has complex geometry and boundary conditions. This method delivers exponential convergence rates. Automatic adaptive allows for accurate approximations of the quantity of interest without the need to obtain an accurate solution in the entire computational domain. This method becomes essential to simulate LWD measurements; since it reduces the computational cost by several orders of magnitude with respect to hp-FEM. Numerical results illustrate the efficiency and high accuracy of the method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Nowadays, the worldwide petroleum industry has significantly progressed, resistivity LWD has become the key technique of complex oil reservoir development and been used widely. The accurate simulation of electric field plays an important part in the LWD instrument measurement data analysis and explains. It also has considerable merit in the design of high-efficient, cost-effective resistivity LWD instruments. The simulation of electric field performed with a self-adaptive hp-finite element method which inherits the advantage of FEM and essential to solve problem which has complex geometry and boundary conditions. This method delivers exponential convergence rates. Automatic adaptive allows for accurate approximations of the quantity of interest without the need to obtain an accurate solution in the entire computational domain. This method becomes essential to simulate LWD measurements; since it reduces the computational cost by several orders of magnitude with respect to hp-FEM. Numerical results illustrate the efficiency and high accuracy of the method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Nowadays, the worldwide petroleum industry has significantly progressed, resistivity LWD has become the key technique of complex oil reservoir development and been used widely. The accurate simulation of electric field plays an important part in the LWD instrument measurement data analysis and explains. It also has considerable merit in the design of high-efficient, cost-effective resistivity LWD instruments. The simulation of electric field performed with a self-adaptive hp-finite element method which inherits the advantage of FEM and essential to solve problem which has complex geometry and boundary conditions. This method delivers exponential convergence rates. Automatic adaptive allows for accurate approximations of the quantity of interest without the need to obtain an accurate solution in the entire computational domain. This method becomes essential to simulate LWD measurements; since it reduces the computational cost by several orders of magnitude with respect to hp-FEM. Numerical results illustrate the efficiency and high accuracy of the method.",
"fno": "3962e874",
"keywords": [
"Resistivity LWD",
"Simulation Of Electric Field",
"Self Adaptive",
"Hp FEM",
"Maxwells Equation"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiaohui Chen",
"givenName": "Xiaohui",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dejun Liu",
"givenName": "Dejun",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhonghua Ma",
"givenName": "Zhonghua",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmtma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-03-01T00:00:00",
"pubType": "proceedings",
"pages": "874-877",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-3962-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3962e870",
"articleId": "12OmNzBwGws",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3962e878",
"articleId": "12OmNzDvSom",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/nswctc/2009/3610/2/3610b627",
"title": "Research and Design of Intelligent Wireless Electric Power Parameter Detection Algorithm Combining FFT with Wavelet Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/nswctc/2009/3610b627/12OmNB9t6lf",
"parentPublication": {
"id": "proceedings/nswctc/2009/3610/2",
"title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdciem/2012/4639/0/4639a596",
"title": "Research on FEM of Magnetic Field Computation of Rotational Motors",
"doi": null,
"abstractUrl": "/proceedings-article/cdciem/2012/4639a596/12OmNrAv3JQ",
"parentPublication": {
"id": "proceedings/cdciem/2012/4639/0",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iitaw/2008/3505/0/3505a859",
"title": "A Bayesian Learning Model in the Agent-based Bilateral Negotiation between the Coal Producers and Electric Power Generators",
"doi": null,
"abstractUrl": "/proceedings-article/iitaw/2008/3505a859/12OmNvvtGWx",
"parentPublication": {
"id": "proceedings/iitaw/2008/3505/0",
"title": "2008 International Symposium on Intelligent Information Technology Application Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2008/3357/1/3357a855",
"title": "Application of Fuzzy Modeling to Identify the Soil Resistivity of Dynamic Grounding System",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357a855/12OmNwbLVmT",
"parentPublication": {
"id": "proceedings/icicta/2008/3357/1",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2011/4373/0/4373a458",
"title": "Application of Agent-based Approach for Multiscale hp-adaptive Finite Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2011/4373a458/12OmNwpXRXy",
"parentPublication": {
"id": "proceedings/cisis/2011/4373/0",
"title": "2011 International Conference on Complex, Intelligent, and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031f130",
"title": "Design of Electric Power Data Acquisition Card Based on PC/104 Bus",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031f130/12OmNxEBzi2",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270a928",
"title": "Research on Battery Identification of Electric Vehicle Battery Management System",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a928/12OmNxG1yPY",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ncis/2011/4355/1/4355a390",
"title": "Evaluation on the Speed Stability of Electric Wheelchairs Based on Speedadaptation",
"doi": null,
"abstractUrl": "/proceedings-article/ncis/2011/4355a390/12OmNy68EyK",
"parentPublication": {
"id": "proceedings/ncis/2011/4355/1",
"title": "Network Computing and Information Security, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2009/3852/0/3852a066",
"title": "The Implementation of FEM and RBF Neural Network in EIT",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2009/3852a066/12OmNyqiaVG",
"parentPublication": {
"id": "proceedings/icinis/2009/3852/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600c489",
"title": "The Applicability of Sensor Size in Inhomogeneous Electric Field Based on ANSYS",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600c489/1x3kh6gVGzC",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzlUKD1",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"acronym": "case",
"groupId": "1001095",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBDQbmn",
"doi": "10.1109/CoASE.2012.6386490",
"title": "Development of semi-automatic painting system for inner hull block structures",
"normalizedTitle": "Development of semi-automatic painting system for inner hull block structures",
"abstract": "Painting works in double hull blocks are very difficult and dangerous because working space is isolated and narrow. The structure in double blocks is too complex to apply automatic painting equipments. For these reasons, every shipyard still applied manual painting process which causes delay in ship construction and low quality. Therefore, the purpose of this study is to develop the semi-automatic painting system that is able to painting inside longitudinal stiffener of double hull blocks. It is divided into two groups. One is the semi-automatic painting machine and the others are the supplement devices. By applying the developed automatic system, productivity and painting quality can be increased and workmen's accident and injuries can be reduced.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Painting works in double hull blocks are very difficult and dangerous because working space is isolated and narrow. The structure in double blocks is too complex to apply automatic painting equipments. For these reasons, every shipyard still applied manual painting process which causes delay in ship construction and low quality. Therefore, the purpose of this study is to develop the semi-automatic painting system that is able to painting inside longitudinal stiffener of double hull blocks. It is divided into two groups. One is the semi-automatic painting machine and the others are the supplement devices. By applying the developed automatic system, productivity and painting quality can be increased and workmen's accident and injuries can be reduced.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Painting works in double hull blocks are very difficult and dangerous because working space is isolated and narrow. The structure in double blocks is too complex to apply automatic painting equipments. For these reasons, every shipyard still applied manual painting process which causes delay in ship construction and low quality. Therefore, the purpose of this study is to develop the semi-automatic painting system that is able to painting inside longitudinal stiffener of double hull blocks. It is divided into two groups. One is the semi-automatic painting machine and the others are the supplement devices. By applying the developed automatic system, productivity and painting quality can be increased and workmen's accident and injuries can be reduced.",
"fno": "06386490",
"keywords": [
"Painting",
"Ships",
"Semiautomatic Painting System",
"Hull Block Structure",
"Automatic Painting Equipments",
"Shipyard",
"Manual Painting Process",
"Supplement Devices",
"Longitudinal Stiffener",
"Double Hull Blocks",
"Painting",
"Films",
"Manuals",
"Sensors",
"Marine Vehicles",
"Paints"
],
"authors": [
{
"affiliation": "Automation R&D Team, Research Institute of Technology, STX Offshore & Shipbuilding Co., Ltd., Korea",
"fullName": "Dong Hoon Lee",
"givenName": "Dong Hoon",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Automation R&D Team, Research Institute of Technology, STX Offshore & Shipbuilding Co., Ltd., Korea",
"fullName": "Ho Kyeong Kim",
"givenName": "Ho Kyeong",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "case",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-08-01T00:00:00",
"pubType": "proceedings",
"pages": "833-836",
"year": "2012",
"issn": "2161-8070",
"isbn": "978-1-4673-0430-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06386489",
"articleId": "12OmNz4SOzk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06386491",
"articleId": "12OmNA0dMIq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icis/2017/5507/0/07960025",
"title": "A flexible finger-mounted airbrush model for immersive freehand painting",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2017/07960025/12OmNBV9Ikp",
"parentPublication": {
"id": "proceedings/icis/2017/5507/0",
"title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480776",
"title": "Cutting, Deforming and Painting of 3D meshes in a Two Handed Viso-haptic VR System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480776/12OmNCwlacX",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2009/4830/0/05255155",
"title": "Non-photorealistic rendering of ink painting style diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2009/05255155/12OmNyFU7aU",
"parentPublication": {
"id": "proceedings/grc/2009/4830/0",
"title": "2009 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/culture-computing/2013/5047/0/5047a139",
"title": "Painting Based Cubic VR Also for CAVE and Spherical Screen Film",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2013/5047a139/12OmNzsrwgQ",
"parentPublication": {
"id": "proceedings/culture-computing/2013/5047/0",
"title": "2013 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/03/mcg2007030012",
"title": "From Abstract Painting to Information Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2007/03/mcg2007030012/13rRUIJcWnp",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050723",
"title": "Painting with Polygons: A Procedural Watercolor Engine",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050723/13rRUxBa5bY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042343",
"title": "A Modular Framework for Digital Painting",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042343/13rRUxDIthe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/03/v0266",
"title": "Efficient Example-Based Painting and Synthesis of 2D Directional Texture",
"doi": null,
"abstractUrl": "/journal/tg/2004/03/v0266/13rRUxcbnH1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798200",
"title": "Panoramic Fluid Painting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798200/1cJ0VsoPxfO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i432",
"title": "Painting Many Pasts: Synthesizing Time Lapse Videos of Paintings",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i432/1m3nu7jSK6Q",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAndii9",
"title": "Biometrics and Kansei Engineering, International Conference on",
"acronym": "icbake",
"groupId": "1002851",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwCaCvC",
"doi": "10.1109/ICBAKE.2011.13",
"title": "Visual Complexity Perception and Texture Image Characteristics",
"normalizedTitle": "Visual Complexity Perception and Texture Image Characteristics",
"abstract": "Visual complexity perception is an important issue in the fields of psychology and computer vision because it leads to the better understanding of the nature of human perception as well as the properties of the objects being perceived. In this study, five important characteristics of texture images that affect visual complexity perception are identified: regularity, understandability, roughness, directionality, and density. Among these, understandability is a deterministic characteristic, which reflects the viewer's prior knowledge and experience. These characteristics significantly affect the visual complexity perception of texture images. In order to achieve our objective, we carried out two experiments involving visual complexity assessment and paired comparison evaluation with 30 respondents. We applied correlation analysis, factor analysis, and multidimensional scaling to analyze the collected data. The experimental results showed that most of the human impressions of visual complexity can be explained by the perceived characteristics of texture images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual complexity perception is an important issue in the fields of psychology and computer vision because it leads to the better understanding of the nature of human perception as well as the properties of the objects being perceived. In this study, five important characteristics of texture images that affect visual complexity perception are identified: regularity, understandability, roughness, directionality, and density. Among these, understandability is a deterministic characteristic, which reflects the viewer's prior knowledge and experience. These characteristics significantly affect the visual complexity perception of texture images. In order to achieve our objective, we carried out two experiments involving visual complexity assessment and paired comparison evaluation with 30 respondents. We applied correlation analysis, factor analysis, and multidimensional scaling to analyze the collected data. The experimental results showed that most of the human impressions of visual complexity can be explained by the perceived characteristics of texture images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual complexity perception is an important issue in the fields of psychology and computer vision because it leads to the better understanding of the nature of human perception as well as the properties of the objects being perceived. In this study, five important characteristics of texture images that affect visual complexity perception are identified: regularity, understandability, roughness, directionality, and density. Among these, understandability is a deterministic characteristic, which reflects the viewer's prior knowledge and experience. These characteristics significantly affect the visual complexity perception of texture images. In order to achieve our objective, we carried out two experiments involving visual complexity assessment and paired comparison evaluation with 30 respondents. We applied correlation analysis, factor analysis, and multidimensional scaling to analyze the collected data. The experimental results showed that most of the human impressions of visual complexity can be explained by the perceived characteristics of texture images.",
"fno": "4512a260",
"keywords": [
"Visual Complexity",
"Kansei",
"Texture Perception",
"Multidimensional Scaling"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiaoying Guo",
"givenName": "Xiaoying",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chie Muraki Asano",
"givenName": "Chie Muraki",
"surname": "Asano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Akira Asano",
"givenName": "Akira",
"surname": "Asano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Takio Kurita",
"givenName": "Takio",
"surname": "Kurita",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icbake",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "260-265",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4512-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4512a256",
"articleId": "12OmNzyp5Zi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4512a266",
"articleId": "12OmNypIYFR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscid/2009/3865/2/3865b352",
"title": "Research of the Affective Responses to Product's Texture Based on the Kansei Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2009/3865b352/12OmNCdk2J4",
"parentPublication": {
"id": "proceedings/iscid/2009/3865/2",
"title": "Computational Intelligence and Design, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/citworkshops/2008/3242/0/3242a374",
"title": "A New Visual Attention Model Using Texture and Object Features",
"doi": null,
"abstractUrl": "/proceedings-article/citworkshops/2008/3242a374/12OmNrAMENj",
"parentPublication": {
"id": "proceedings/citworkshops/2008/3242/0",
"title": "Computer and Information Technology, IEEE 8th International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2011/4517/0/4517a352",
"title": "Low-Complexity Encoding Method for H.264/AVC Based on Visual Perception",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2011/4517a352/12OmNwwuE22",
"parentPublication": {
"id": "proceedings/iih-msp/2011/4517/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apcip/2009/3699/2/3699b055",
"title": "Texture Image Classification Using Perceptual Texture Features and Gabor Wavelet Features",
"doi": null,
"abstractUrl": "/proceedings-article/apcip/2009/3699b055/12OmNx38vOb",
"parentPublication": {
"id": "proceedings/apcip/2009/3699/1",
"title": "Information Processing, Asia-Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apcip/2009/3699/2/3699b051",
"title": "Combining Salient Points and Visual Perception Texture Features for Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/apcip/2009/3699b051/12OmNz4SOsy",
"parentPublication": {
"id": "proceedings/apcip/2009/3699/1",
"title": "Information Processing, Asia-Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2007/2909/1/290910369",
"title": "New Texture Features Based on Wavelet Transform Coinciding with Human Visual Perception",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2007/290910369/12OmNzd7bTy",
"parentPublication": {
"id": "proceedings/snpd/2007/2909/1",
"title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/3/73103648",
"title": "Pyramid-based texture analysis/synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73103648/12OmNznkKfg",
"parentPublication": {
"id": "proceedings/icip/1995/7310/3",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2003/04/v0512",
"title": "Texture Synthesis for 3D Shape Representation",
"doi": null,
"abstractUrl": "/journal/tg/2003/04/v0512/13rRUEgarsB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/04/07115121",
"title": "Experimental Study on the Perception Characteristics of Haptic Texture by Multidimensional Scaling",
"doi": null,
"abstractUrl": "/journal/th/2015/04/07115121/13rRUILtJr6",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/assp/2021/9883/0/988300a005",
"title": "Realistic Image-to-Image Translation with Enhanced Texture",
"doi": null,
"abstractUrl": "/proceedings-article/assp/2021/988300a005/1CakWAk3imY",
"parentPublication": {
"id": "proceedings/assp/2021/9883/0",
"title": "2021 2nd Asia Symposium on Signal Processing (ASSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrMHOd6",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxj23gX",
"doi": "10.1109/HICSS.2016.449",
"title": "Visual Complexity and Figure-Background Color Contrast of E-Commerce Websites: Effects on Consumers' Emotional Responses",
"normalizedTitle": "Visual Complexity and Figure-Background Color Contrast of E-Commerce Websites: Effects on Consumers' Emotional Responses",
"abstract": "This paper examines the effects of visual complexity and figure-background color contrast of e-commerce websites on consumers' emotional responses (i.e., pleasantness, arousal, and dominance) which is considered as the direct antecedents to online shopping behaviors. Data collection was carried out in a laboratory experiment, which was a 4 (visual complexity) x 4 (figure-background color contrast) between-subjects factorial design. Based on questionnaire responses from 324 subjects, the results showed that visual complexity and figure-background color contrast of website had partially significant effects on consumers' emotional responses. We believed that a well-designed website with adequate visual complexity and figure-background color contrast can create a desired environment to attract and retain consumers. The findings of this study provide important implications for theory and practice.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper examines the effects of visual complexity and figure-background color contrast of e-commerce websites on consumers' emotional responses (i.e., pleasantness, arousal, and dominance) which is considered as the direct antecedents to online shopping behaviors. Data collection was carried out in a laboratory experiment, which was a 4 (visual complexity) x 4 (figure-background color contrast) between-subjects factorial design. Based on questionnaire responses from 324 subjects, the results showed that visual complexity and figure-background color contrast of website had partially significant effects on consumers' emotional responses. We believed that a well-designed website with adequate visual complexity and figure-background color contrast can create a desired environment to attract and retain consumers. The findings of this study provide important implications for theory and practice.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper examines the effects of visual complexity and figure-background color contrast of e-commerce websites on consumers' emotional responses (i.e., pleasantness, arousal, and dominance) which is considered as the direct antecedents to online shopping behaviors. Data collection was carried out in a laboratory experiment, which was a 4 (visual complexity) x 4 (figure-background color contrast) between-subjects factorial design. Based on questionnaire responses from 324 subjects, the results showed that visual complexity and figure-background color contrast of website had partially significant effects on consumers' emotional responses. We believed that a well-designed website with adequate visual complexity and figure-background color contrast can create a desired environment to attract and retain consumers. The findings of this study provide important implications for theory and practice.",
"fno": "5670d594",
"keywords": [
"Visualization",
"Color",
"Complexity Theory",
"Image Color Analysis",
"Human Computer Interaction",
"Context",
"Guidelines"
],
"authors": [
{
"affiliation": null,
"fullName": "Sheng-Wei Lin",
"givenName": "Sheng-Wei",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Louis Yi-Shih Lo",
"givenName": "Louis Yi-Shih",
"surname": "Lo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Travis K. Huang",
"givenName": "Travis K.",
"surname": "Huang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2016-01-01T00:00:00",
"pubType": "proceedings",
"pages": "3594-3603",
"year": "2016",
"issn": "1530-1605",
"isbn": "978-0-7695-5670-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5670d584",
"articleId": "12OmNASraUO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5670d604",
"articleId": "12OmNz61duh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2004/2128/1/212810067",
"title": "Iterative Figure-Ground Discrimination",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2004/212810067/12OmNAq3hCO",
"parentPublication": {
"id": "proceedings/icpr/2004/2128/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460290",
"title": "Example-based contrast enhancement for portrait photograph",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460290/12OmNqC2v3G",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985a825",
"title": "Eating with Eyes: Assessing the Importance of the Visual Perception of Consumers",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985a825/12OmNqNXEta",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2012/1662/0/06215215",
"title": "Contrast preserving decolorization",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2012/06215215/12OmNrJiCLp",
"parentPublication": {
"id": "proceedings/iccp/2012/1662/0",
"title": "2012 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270202",
"title": "Tracking as Repeated Figure/Ground Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270202/12OmNvjyxyw",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460668",
"title": "Video figure ground labeling",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460668/12OmNwCaCwi",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/06909916",
"title": "Sequential Convex Relaxation for Mutual Information-Based Unsupervised Figure-Ground Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/06909916/12OmNwF0BNA",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948426",
"title": "SmartColor: Real-time color correction and contrast for optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948426/12OmNzaQoFo",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/12/07138644",
"title": "SmartColor: Real-Time Color and Contrast Correction for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/12/07138644/13rRUwfZC0k",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzSh1bn",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"acronym": "cgiv",
"groupId": "1001775",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzvhvBh",
"doi": "10.1109/CGIV.2009.66",
"title": "The Impacts of Animated-Virtual Actors' Visual Complexity and Simulator Sickness in Virtual Reality Applications",
"normalizedTitle": "The Impacts of Animated-Virtual Actors' Visual Complexity and Simulator Sickness in Virtual Reality Applications",
"abstract": "This article discusses the effects of Animated Virtual Actors (AVAs) visual complexity on Simulator Sickness (SS) in Virtual Reality (VR) applications. SS is one of the major disadvantages of VR simulations. Previous research has shown that visual complexity correlates with SS. Yet complex AVAs are increasingly used along with real-time graphics. Minimising SS for a VR application is thus beneficial. A series of VR simulations were created to teach second-year psychology students about the navigational capabilities of desert ants with different levels of AVA's visual complexity: flat, cartoon, or life- like. We predicted that more complex AVAs would induce more SS. The results contradicted the predictions, with no significant differences in SS between groups as a function of the AVAs visual complexity. Moreover, our methods succeeded in low overall levels of SS in all the simulations. Possible explanations and our future research directions are discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This article discusses the effects of Animated Virtual Actors (AVAs) visual complexity on Simulator Sickness (SS) in Virtual Reality (VR) applications. SS is one of the major disadvantages of VR simulations. Previous research has shown that visual complexity correlates with SS. Yet complex AVAs are increasingly used along with real-time graphics. Minimising SS for a VR application is thus beneficial. A series of VR simulations were created to teach second-year psychology students about the navigational capabilities of desert ants with different levels of AVA's visual complexity: flat, cartoon, or life- like. We predicted that more complex AVAs would induce more SS. The results contradicted the predictions, with no significant differences in SS between groups as a function of the AVAs visual complexity. Moreover, our methods succeeded in low overall levels of SS in all the simulations. Possible explanations and our future research directions are discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This article discusses the effects of Animated Virtual Actors (AVAs) visual complexity on Simulator Sickness (SS) in Virtual Reality (VR) applications. SS is one of the major disadvantages of VR simulations. Previous research has shown that visual complexity correlates with SS. Yet complex AVAs are increasingly used along with real-time graphics. Minimising SS for a VR application is thus beneficial. A series of VR simulations were created to teach second-year psychology students about the navigational capabilities of desert ants with different levels of AVA's visual complexity: flat, cartoon, or life- like. We predicted that more complex AVAs would induce more SS. The results contradicted the predictions, with no significant differences in SS between groups as a function of the AVAs visual complexity. Moreover, our methods succeeded in low overall levels of SS in all the simulations. Possible explanations and our future research directions are discussed.",
"fno": "3789a147",
"keywords": [
"Computer Aided Instruction",
"Computer Animation",
"Human Factors",
"Psychology",
"Virtual Reality",
"Animated Virtual Actor",
"Visual Complexity",
"Simulator Sickness",
"Virtual Reality",
"Real Time Graphics",
"Psychology Students",
"Desert Ant Navigational Capabilities",
"Animation",
"Virtual Reality",
"Brain Modeling",
"Computational Modeling",
"Visualization",
"Computer Simulation",
"Graphics",
"Psychology",
"Navigation",
"Layout",
"Virtual Actors",
"Virtual Reality",
"Visual Complexity",
"Simulation Sickness",
"Visualisation"
],
"authors": [
{
"affiliation": "Dept. of Comput., Macquarie Univ. Sydney, Sydney, NSW, Australia",
"fullName": "Iwan Kartiko",
"givenName": "Iwan",
"surname": "Kartiko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput., Macquarie Univ. Sydney, Sydney, NSW, Australia",
"fullName": "Manolya Kavakli",
"givenName": "Manolya",
"surname": "Kavakli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Brain, Behaviour & Evolution, Macquarie Univ. Sydney, Sydney, NSW, Australia",
"fullName": "Ken Cheng",
"givenName": "Ken",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cgiv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-08-01T00:00:00",
"pubType": "proceedings",
"pages": "147-152",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3789-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3789a044",
"articleId": "12OmNxEBz8F",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3789a050",
"articleId": "12OmNx3q6Xm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/searis/2012/1249/0/06231171",
"title": "Configurable semi-autonomic animated animal characters in interactive virtual reality applications",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2012/06231171/12OmNvnOww6",
"parentPublication": {
"id": "proceedings/searis/2012/1249/0",
"title": "2012 5th Workshop on Software Engineering and Architectures for Realtime Interactive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1993/4910/0/00378256",
"title": "What you see is what you hear-Acoustics applied in virtual worlds",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1993/00378256/12OmNyrqzAY",
"parentPublication": {
"id": "proceedings/vrais/1993/4910/0",
"title": "IEEE 1993 Symposium on Research Frontiers in Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446346",
"title": "Reducing VR Sickness Through Peripheral Visual Effects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446346/13bd1fHrlRY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404606",
"title": "Feasibility of Training Athletes for High-Pressure Situations Using Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404606/13rRUwghd99",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493426",
"title": "How Real Can Virtual Become?",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493426/14tNJnWdtTy",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a542",
"title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a542/1CJcAVYrJew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049694",
"title": "Effect of Frame Rate on User Experience, Performance, and Simulator Sickness in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049694/1KYopPcDKk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805431",
"title": "Common Fate for Animated Transitions in Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805431/1cG4F76usA8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798158",
"title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a247",
"title": "Real-Time Detection of Simulator Sickness in Virtual Reality Games Based on Players' Psychophysiological Data during Gameplay",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a247/1pBMj6Ryu9q",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1hJrHq07uw0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"acronym": "big-data",
"groupId": "1802964",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hJrK80FiNy",
"doi": "10.1109/BigData47090.2019.9005498",
"title": "Study of the Effects of Visual Complexity and Consumer Experience on Visual Attention and Purchase Behavior through the Use of Eye Tracking",
"normalizedTitle": "Study of the Effects of Visual Complexity and Consumer Experience on Visual Attention and Purchase Behavior through the Use of Eye Tracking",
"abstract": "The purpose of this study is to clarify the effects of visual complexity and consumer's experience on visual attention and purchase behavior. Data on visual attention collected by using eye tracking has been applied to investigate the effects of advertisement design and consumer's attributes. However, it is difficult to analyze the effects of various factors on visual attention and purchase behavior because they have complicated relations. This study uses visual complexity as a factor of visual stimuli affecting visual attention and purchase behavior. Moreover, consumer experience is applied as an attribute of consumers. Experience is understood to be a factor that strongly impacts purchase behavior. This study applies path analysis for clarifying the relationships among these factors. Eye tracking data collected on catalog mail order shopping was used to clarify the relationship among visual complexity, consumer experience, visual attention, and purchase behavior.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The purpose of this study is to clarify the effects of visual complexity and consumer's experience on visual attention and purchase behavior. Data on visual attention collected by using eye tracking has been applied to investigate the effects of advertisement design and consumer's attributes. However, it is difficult to analyze the effects of various factors on visual attention and purchase behavior because they have complicated relations. This study uses visual complexity as a factor of visual stimuli affecting visual attention and purchase behavior. Moreover, consumer experience is applied as an attribute of consumers. Experience is understood to be a factor that strongly impacts purchase behavior. This study applies path analysis for clarifying the relationships among these factors. Eye tracking data collected on catalog mail order shopping was used to clarify the relationship among visual complexity, consumer experience, visual attention, and purchase behavior.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The purpose of this study is to clarify the effects of visual complexity and consumer's experience on visual attention and purchase behavior. Data on visual attention collected by using eye tracking has been applied to investigate the effects of advertisement design and consumer's attributes. However, it is difficult to analyze the effects of various factors on visual attention and purchase behavior because they have complicated relations. This study uses visual complexity as a factor of visual stimuli affecting visual attention and purchase behavior. Moreover, consumer experience is applied as an attribute of consumers. Experience is understood to be a factor that strongly impacts purchase behavior. This study applies path analysis for clarifying the relationships among these factors. Eye tracking data collected on catalog mail order shopping was used to clarify the relationship among visual complexity, consumer experience, visual attention, and purchase behavior.",
"fno": "09005498",
"keywords": [
"Consumer Behaviour",
"Marketing Data Processing",
"Object Tracking",
"Purchasing",
"Visual Perception",
"Purchase Behavior",
"Visual Complexity",
"Consumer Experience",
"Visual Attention",
"Eye Tracking",
"Visual Stimuli",
"Visualization",
"Complexity Theory",
"Gaze Tracking",
"Layout",
"Task Analysis",
"Atmospheric Measurements",
"Eye Tracking",
"Advertisement",
"Visual Complexity",
"Consumer Experience",
"Path Analysis"
],
"authors": [
{
"affiliation": "University of Hyogo,School of Social Information Scienceline,Kobe,Japan",
"fullName": "Ken Ishibashi",
"givenName": "Ken",
"surname": "Ishibashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kansai University,Graduate School of Business and Commerce,Suita,Japan",
"fullName": "Chen Xiao",
"givenName": "Chen",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kansai University,Faculty of Business and Commerce,Suita,Japan",
"fullName": "Katsutoshi Yada",
"givenName": "Katsutoshi",
"surname": "Yada",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "big-data",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-12-01T00:00:00",
"pubType": "proceedings",
"pages": "2664-2673",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-0858-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09006317",
"articleId": "1hJrSuagR3i",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09005996",
"articleId": "1hJrJxlzxaU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wmso/2008/3484/0/3484a115",
"title": "Research on Purchase Intentions of Residential Product Based on Structural Equation Model",
"doi": null,
"abstractUrl": "/proceedings-article/wmso/2008/3484a115/12OmNAlvHrh",
"parentPublication": {
"id": "proceedings/wmso/2008/3484/0",
"title": "Modelling, Simulation and Optimization, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2015/7343/0/07314288",
"title": "Challenges and Perspectives in Big Eye-Movement Data Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314288/12OmNButq1p",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2015/7568/0/7568a107",
"title": "Visual Analysis of Eye Movements by Hierarchical Filter Wheels",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a107/12OmNC2xhyv",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icee/2010/3997/0/3997c491",
"title": "Research on e-Commerce Environment in Decision-making Model of Consumer Purchase",
"doi": null,
"abstractUrl": "/proceedings-article/icee/2010/3997c491/12OmNwMXnou",
"parentPublication": {
"id": "proceedings/icee/2010/3997/0",
"title": "International Conference on E-Business and E-Government",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2014/4443/0/06847400",
"title": "Factors Effecting Purchase Intention Using Coverpage Image on Cosmetic E-Commerce Website: A Case Study of Thai Female Customers",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2014/06847400/12OmNy6Zs1s",
"parentPublication": {
"id": "proceedings/icisa/2014/4443/0",
"title": "2014 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscmi/2015/9819/0/9819a126",
"title": "Predicting Consumer's Behavior Using Eye Tracking Data",
"doi": null,
"abstractUrl": "/proceedings-article/iscmi/2015/9819a126/12OmNzt0IGz",
"parentPublication": {
"id": "proceedings/iscmi/2015/9819/0",
"title": "2015 Second International Conference on Soft Computing and Machine Intelligence (ISCMI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2019/1307/0/130700a365",
"title": "Simulation of Consumer Purchase Behavior Based on Computational Experiments",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2019/130700a365/18Av3YHeHhS",
"parentPublication": {
"id": "proceedings/icitbs/2019/1307/0",
"title": "2019 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2022/1647/0/09767470",
"title": "Lessons Learned from an Eye Tracking Study for Targeted Advertising in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2022/09767470/1Df864IUvew",
"parentPublication": {
"id": "proceedings/percom-workshops/2022/1647/0",
"title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798327",
"title": "Eye-gaze-triggered Visual Cues to Restore Attention in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798327/1cJ0HmmdfUY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798030",
"title": "VR-HMD Eye Tracker in Active Visual Field Testing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798030/1cJ1dsOkvw4",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1uiluGq0Oo8",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1uilU3kbJ5e",
"doi": "10.1109/ICME51207.2021.9428109",
"title": "A Jensen-Shannon Divergence Driven Metric of Visual Scanning Efficiency Indicates Performance of Virtual Driving",
"normalizedTitle": "A Jensen-Shannon Divergence Driven Metric of Visual Scanning Efficiency Indicates Performance of Virtual Driving",
"abstract": "Visual scanning plays an important role in sampling visual information from the surrounding environments for a lot of everyday sensorimotor tasks, such as driving. In this paper, we consider the problem of visual scanning mechanism underpinning sensorimotor tasks in 3D dynamic environments. We exploit the use of eye tracking data as a behaviometric, for indicating the visuo-motor behavioral measure in the context of virtual driving. A new metric of visual scanning efficiency (VSE), which is defined as a mathematical divergence between a fixation distribution and a distribution of optical flows induced by fixations, is proposed by making use of a widely-known information theoretic tool, namely the square root of Jensen-Shannon divergence. Psychophysical eye tracking studies, in virtual reality based driving, are conducted to reveal that the new metric of visual scanning efficiency can be employed very well as a proxy evaluation for driving performance. These results suggest that the exploitation of eye tracking data provides an effective behaviometric for sensorimotor activities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual scanning plays an important role in sampling visual information from the surrounding environments for a lot of everyday sensorimotor tasks, such as driving. In this paper, we consider the problem of visual scanning mechanism underpinning sensorimotor tasks in 3D dynamic environments. We exploit the use of eye tracking data as a behaviometric, for indicating the visuo-motor behavioral measure in the context of virtual driving. A new metric of visual scanning efficiency (VSE), which is defined as a mathematical divergence between a fixation distribution and a distribution of optical flows induced by fixations, is proposed by making use of a widely-known information theoretic tool, namely the square root of Jensen-Shannon divergence. Psychophysical eye tracking studies, in virtual reality based driving, are conducted to reveal that the new metric of visual scanning efficiency can be employed very well as a proxy evaluation for driving performance. These results suggest that the exploitation of eye tracking data provides an effective behaviometric for sensorimotor activities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual scanning plays an important role in sampling visual information from the surrounding environments for a lot of everyday sensorimotor tasks, such as driving. In this paper, we consider the problem of visual scanning mechanism underpinning sensorimotor tasks in 3D dynamic environments. We exploit the use of eye tracking data as a behaviometric, for indicating the visuo-motor behavioral measure in the context of virtual driving. A new metric of visual scanning efficiency (VSE), which is defined as a mathematical divergence between a fixation distribution and a distribution of optical flows induced by fixations, is proposed by making use of a widely-known information theoretic tool, namely the square root of Jensen-Shannon divergence. Psychophysical eye tracking studies, in virtual reality based driving, are conducted to reveal that the new metric of visual scanning efficiency can be employed very well as a proxy evaluation for driving performance. These results suggest that the exploitation of eye tracking data provides an effective behaviometric for sensorimotor activities.",
"fno": "09428109",
"keywords": [
"Gaze Tracking",
"Image Sequences",
"Virtual Reality",
"Visual Perception",
"Virtual Driving",
"Visual Information",
"Sensorimotor Tasks",
"3 D Dynamic Environments",
"Eye Tracking Data",
"Visuo Motor Behavioral Measure",
"Mathematical Divergence",
"Information Theoretic Tool",
"Psychophysical Eye Tracking Studies",
"Virtual Reality Based Driving",
"Driving Performance",
"Jensen Shannon Divergence Driven Metric",
"Visual Scanning Efficiency",
"Behaviometric",
"Measurement",
"Visualization",
"Three Dimensional Displays",
"Conferences",
"Gaze Tracking",
"Virtual Reality",
"Tools",
"Visual Scanning Efficiency",
"Eye Tracking",
"Jensen Shannon Divergence JSD",
"Behaviometric"
],
"authors": [
{
"affiliation": "Tianjin University,College of Intelligence and Computing,Tianjin,China",
"fullName": "Zezhong Lv",
"givenName": "Zezhong",
"surname": "Lv",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin University,College of Intelligence and Computing,Tianjin,China",
"fullName": "Qing Xu",
"givenName": "Qing",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Alpen-Adria Universitat Klagenfurt,Klagenfurt,Austria",
"fullName": "Klaus Schoeffmann",
"givenName": "Klaus",
"surname": "Schoeffmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Huddersfield,Huddersfield,UK",
"fullName": "Simon Parkinson",
"givenName": "Simon",
"surname": "Parkinson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3864-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09428080",
"articleId": "1uilUxuuH8k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09428467",
"articleId": "1uilO1EGYNy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bdva/2015/7343/0/07314288",
"title": "Challenges and Perspectives in Big Eye-Movement Data Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314288/12OmNButq1p",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/02/v0155",
"title": "Scanning Scene Tunnel for City Traversing",
"doi": null,
"abstractUrl": "/journal/tg/2006/02/v0155/13rRUwkfAZ6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a310",
"title": "My Eyes Hurt: Effects of Jitter in 3D Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a310/1CJdbzCNHUc",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a170",
"title": "Development and evaluation of car training system using VR and eye tracking technology",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a170/1GU75yVJubS",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873986",
"title": "Weighted Pointer: Error-aware Gaze-based Interaction through Fallback Modalities",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873986/1GjwNuaj2ms",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956312",
"title": "A Joint Cascaded Framework for Simultaneous Eye State, Eye Center, and Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956312/1IHq8em8jug",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2022/9084/0/10101549",
"title": "Reproduction of mental states in driving using a visual filter",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2022/10101549/1MwECrPOVqg",
"parentPublication": {
"id": "proceedings/icci*cc/2022/9084/0",
"title": "2022 IEEE 21st International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798327",
"title": "Eye-gaze-triggered Visual Cues to Restore Attention in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798327/1cJ0HmmdfUY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a397",
"title": "Eye Tracking Data Collection Protocol for VR for Remotely Located Subjects using Blockchain and Smart Contracts",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a397/1qpzAHsmwlW",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a554",
"title": "Estimating Gaze From Head and Hand Pose and Scene Images for Open-Ended Exploration in VR Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a554/1tnY5akLwvS",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvmowTo",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"acronym": "iciev",
"groupId": "1802578",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAndiyd",
"doi": "10.1109/ICIEV.2014.7135996",
"title": "Variations of white matter tract length in regions of emotional regulation as assessed in alzheimer diffusion tensor images",
"normalizedTitle": "Variations of white matter tract length in regions of emotional regulation as assessed in alzheimer diffusion tensor images",
"abstract": "Alzheimer's disease (AD) is a progressive neurodegenerative disorder which affects various regions of brain. The primary damage occurs in white matter (WM) tracts leading to disintegration and death of neurons. Diffusion tensor imaging is one of the most widely used techniques to study the microstructural changes in WM. In this work, variations in length of WM tracts for regions associated with emotional regulation are studied. The images obtained from ADNI database are pre-processed to remove non-brain tissues and non-rigid registration is performed. Deterministic streamline tractography is used to reconstruct the WM tracts for whole brain. Length of each tract is estimated by voxel counting method. Brodmann area maps are superimposed to extract the tracts for areas 11, 38 and 47. Average tract length of each subregion is compared for AD and normal controls. The results show that there is a reduction in average tract length for AD subjects in the three regions studied. Maximum reduction of 27.15% is observed for area 11 followed by 24.43% for area 38 and 16.31% for area 47. No correlation is observed with MMSE score for any of these three sub-regions. This method of quantification of WM degeneration can be used to assess the severity of AD progression.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Alzheimer's disease (AD) is a progressive neurodegenerative disorder which affects various regions of brain. The primary damage occurs in white matter (WM) tracts leading to disintegration and death of neurons. Diffusion tensor imaging is one of the most widely used techniques to study the microstructural changes in WM. In this work, variations in length of WM tracts for regions associated with emotional regulation are studied. The images obtained from ADNI database are pre-processed to remove non-brain tissues and non-rigid registration is performed. Deterministic streamline tractography is used to reconstruct the WM tracts for whole brain. Length of each tract is estimated by voxel counting method. Brodmann area maps are superimposed to extract the tracts for areas 11, 38 and 47. Average tract length of each subregion is compared for AD and normal controls. The results show that there is a reduction in average tract length for AD subjects in the three regions studied. Maximum reduction of 27.15% is observed for area 11 followed by 24.43% for area 38 and 16.31% for area 47. No correlation is observed with MMSE score for any of these three sub-regions. This method of quantification of WM degeneration can be used to assess the severity of AD progression.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Alzheimer's disease (AD) is a progressive neurodegenerative disorder which affects various regions of brain. The primary damage occurs in white matter (WM) tracts leading to disintegration and death of neurons. Diffusion tensor imaging is one of the most widely used techniques to study the microstructural changes in WM. In this work, variations in length of WM tracts for regions associated with emotional regulation are studied. The images obtained from ADNI database are pre-processed to remove non-brain tissues and non-rigid registration is performed. Deterministic streamline tractography is used to reconstruct the WM tracts for whole brain. Length of each tract is estimated by voxel counting method. Brodmann area maps are superimposed to extract the tracts for areas 11, 38 and 47. Average tract length of each subregion is compared for AD and normal controls. The results show that there is a reduction in average tract length for AD subjects in the three regions studied. Maximum reduction of 27.15% is observed for area 11 followed by 24.43% for area 38 and 16.31% for area 47. No correlation is observed with MMSE score for any of these three sub-regions. This method of quantification of WM degeneration can be used to assess the severity of AD progression.",
"fno": "07135996",
"keywords": [
"Alzheimers Disease",
"Tensile Stress",
"Diffusion Tensor Imaging",
"Streaming Media",
"Image Reconstruction",
"Alzheimers Disease Diffusion Tensor Imaging Deterministic Streamline Tractography Brodmann Area White Matter Tract"
],
"authors": [
{
"affiliation": "Non-Invasive Imaging and Diagnostics Laboratory Biomedical Engineering Group, Department of Applied Mechanics Indian Institute of Technology Madras, India",
"fullName": "Ranjan Piyush",
"givenName": "Ranjan",
"surname": "Piyush",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Non-Invasive Imaging and Diagnostics Laboratory Biomedical Engineering Group, Department of Applied Mechanics Indian Institute of Technology Madras, India",
"fullName": "S. Ramakrishnan",
"givenName": "S.",
"surname": "Ramakrishnan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iciev",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-5179-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07135995",
"articleId": "12OmNBPtJBv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07135997",
"articleId": "12OmNyuya07",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdmw/2012/4925/0/4925a086",
"title": "Discovering Aberrant Patterns of Human Connectome in Alzheimer's Disease via Subgraph Mining",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2012/4925a086/12OmNBlofMF",
"parentPublication": {
"id": "proceedings/icdmw/2012/4925/0",
"title": "2012 IEEE 12th International Conference on Data Mining Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2012/4905/0/4905b014",
"title": "A Similarity Model and Segmentation Algorithm for White Matter Fiber Tracts",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2012/4905b014/12OmNBpVPYF",
"parentPublication": {
"id": "proceedings/icdm/2012/4905/0",
"title": "2012 IEEE 12th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2016/3906/0/3906a061",
"title": "Aging Related White Matter Tracts Detection Based on 42 Clinically Healthy Subjects",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2016/3906a061/12OmNBziB9v",
"parentPublication": {
"id": "proceedings/itme/2016/3906/0",
"title": "2016 8th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660007",
"title": "Visualization of White Matter Tracts with Wrapped Streamlines",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660007/12OmNCmpcT8",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532777",
"title": "Visualization of white matter tracts with wrapped streamlines",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532777/12OmNqBKTNu",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmbia/2012/0354/0/06164744",
"title": "A fiber tracking method guided by volumetric tract segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/mmbia/2012/06164744/12OmNx7XH6C",
"parentPublication": {
"id": "proceedings/mmbia/2012/0354/0",
"title": "2012 IEEE Workshop on Mathematical Methods in Biomedical Image Analysis (MMBIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2018/6060/0/606001a345",
"title": "Classification of Alzheimer Disease on Imaging Modalities with Deep CNNs Using Cross-Modal Transfer Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2018/606001a345/12OmNxeutfh",
"parentPublication": {
"id": "proceedings/cbms/2018/6060/0",
"title": "2018 IEEE 31st International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a694",
"title": "Using Fourier Descriptor Features in the Classification of White Matter Fiber Tracts in DTI",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a694/12OmNym2c2m",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042344",
"title": "Exploration of the Brain’s White Matter Structure through Visual Abstraction and Multi-Scale Local Fiber Tract Contraction",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042344/13rRUxcbnCt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09105974",
"title": "Automated grey and white matter segmentation in digitized Aβ human brain tissue slide images",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09105974/1kwqN1w32wM",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxWuirp",
"title": "Proceedings of 1st IEEE Workshop on Variational and Level Set Methods in Computer Vision",
"acronym": "vlsm",
"groupId": "1002233",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqOffvN",
"doi": "10.1109/VLSM.2001.938885",
"title": "Fiber Tract Mapping from Diffusion Tensor MRI",
"normalizedTitle": "Fiber Tract Mapping from Diffusion Tensor MRI",
"abstract": "To understand evolving pathology in the central nervous system (CNS) and develop effective treatments, it is essential to correlate the nerve fiber connectivity with the visualization of function. Diffusion tensor imaging (DTI) can provide the fundamental information required for viewing structural connectivity. In this paper, we present a novel algorithm for automatic fiber tract mapping in the CNS specifically, the spinal cord. The automatic fiber tract mapping problem will be solved in two phases, namely a data smoothing phase and a fiber tract mapping phase. In the former, smoothing is achieved via a new weighted TV-norm minimization (for vector-valued data) which strives to smooth while retaining all relevant detail. For the fiber tract mapping, a smooth 3D vector field indicating the dominant anisotropic direction at each spatial location is computed from the smoothed data. Fiber tracts are then determined as the smooth integral curves of this vector field in a variational framework.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To understand evolving pathology in the central nervous system (CNS) and develop effective treatments, it is essential to correlate the nerve fiber connectivity with the visualization of function. Diffusion tensor imaging (DTI) can provide the fundamental information required for viewing structural connectivity. In this paper, we present a novel algorithm for automatic fiber tract mapping in the CNS specifically, the spinal cord. The automatic fiber tract mapping problem will be solved in two phases, namely a data smoothing phase and a fiber tract mapping phase. In the former, smoothing is achieved via a new weighted TV-norm minimization (for vector-valued data) which strives to smooth while retaining all relevant detail. For the fiber tract mapping, a smooth 3D vector field indicating the dominant anisotropic direction at each spatial location is computed from the smoothed data. Fiber tracts are then determined as the smooth integral curves of this vector field in a variational framework.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To understand evolving pathology in the central nervous system (CNS) and develop effective treatments, it is essential to correlate the nerve fiber connectivity with the visualization of function. Diffusion tensor imaging (DTI) can provide the fundamental information required for viewing structural connectivity. In this paper, we present a novel algorithm for automatic fiber tract mapping in the CNS specifically, the spinal cord. The automatic fiber tract mapping problem will be solved in two phases, namely a data smoothing phase and a fiber tract mapping phase. In the former, smoothing is achieved via a new weighted TV-norm minimization (for vector-valued data) which strives to smooth while retaining all relevant detail. For the fiber tract mapping, a smooth 3D vector field indicating the dominant anisotropic direction at each spatial location is computed from the smoothed data. Fiber tracts are then determined as the smooth integral curves of this vector field in a variational framework.",
"fno": "12780081",
"keywords": [],
"authors": [
{
"affiliation": "University of Florida",
"fullName": "B.C. Vemuri",
"givenName": "B.C.",
"surname": "Vemuri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "Y. Chen",
"givenName": "Y.",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "M. Rao",
"givenName": "M.",
"surname": "Rao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "T. McGraw",
"givenName": "T.",
"surname": "McGraw",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "Z. Wang",
"givenName": "Z.",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida",
"fullName": "T. Mareci",
"givenName": "T.",
"surname": "Mareci",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vlsm",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-07-01T00:00:00",
"pubType": "proceedings",
"pages": "81",
"year": "2001",
"issn": null,
"isbn": "0-7695-1278-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "12780073",
"articleId": "12OmNyxXlxx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "12780089",
"articleId": "12OmNwcCIX3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyNQSGO",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzWx08J",
"doi": "10.1109/CVPR.2007.383096",
"title": "Fiber Tract Clustering on Manifolds With Dual Rooted-Graphs",
"normalizedTitle": "Fiber Tract Clustering on Manifolds With Dual Rooted-Graphs",
"abstract": "We propose a manifold learning approach to fiber tract clustering using a novel similarity measure between fiber tracts constructed from dual-rooted graphs. In particular, to generate this similarity measure, the chamfer or Hausdorff distance is initially employed as a local distance metric to construct minimum spanning trees between pairwise fiber tracts. These minimum spanning trees are effective in capturing the intrinsic geometry of the fiber tracts. Hence, they are used to capture the neighborhood structures of the fiber tract data set. We next assume the high-dimensional input fiber tracts to lie on low-dimensional non-linear manifolds. We apply Locally Linear Embedding, a popular manifold learning technique, to define a low-dimensional embedding of the fiber tracts that preserves the neighborhood structures of the high-dimensional data structure as captured by the method of dual-rooted graphs. Clustering is then performed on this low-dimensional data structure using the k-means algorithm. We illustrate our resulting clustering technique on both synthetic data and on real fiber tract data obtained from diffusion tensor imaging.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a manifold learning approach to fiber tract clustering using a novel similarity measure between fiber tracts constructed from dual-rooted graphs. In particular, to generate this similarity measure, the chamfer or Hausdorff distance is initially employed as a local distance metric to construct minimum spanning trees between pairwise fiber tracts. These minimum spanning trees are effective in capturing the intrinsic geometry of the fiber tracts. Hence, they are used to capture the neighborhood structures of the fiber tract data set. We next assume the high-dimensional input fiber tracts to lie on low-dimensional non-linear manifolds. We apply Locally Linear Embedding, a popular manifold learning technique, to define a low-dimensional embedding of the fiber tracts that preserves the neighborhood structures of the high-dimensional data structure as captured by the method of dual-rooted graphs. Clustering is then performed on this low-dimensional data structure using the k-means algorithm. We illustrate our resulting clustering technique on both synthetic data and on real fiber tract data obtained from diffusion tensor imaging.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a manifold learning approach to fiber tract clustering using a novel similarity measure between fiber tracts constructed from dual-rooted graphs. In particular, to generate this similarity measure, the chamfer or Hausdorff distance is initially employed as a local distance metric to construct minimum spanning trees between pairwise fiber tracts. These minimum spanning trees are effective in capturing the intrinsic geometry of the fiber tracts. Hence, they are used to capture the neighborhood structures of the fiber tract data set. We next assume the high-dimensional input fiber tracts to lie on low-dimensional non-linear manifolds. We apply Locally Linear Embedding, a popular manifold learning technique, to define a low-dimensional embedding of the fiber tracts that preserves the neighborhood structures of the high-dimensional data structure as captured by the method of dual-rooted graphs. Clustering is then performed on this low-dimensional data structure using the k-means algorithm. We illustrate our resulting clustering technique on both synthetic data and on real fiber tract data obtained from diffusion tensor imaging.",
"fno": "04270121",
"keywords": [],
"authors": [
{
"affiliation": "Dept. of Radiology, Brigham&Women's Hospital, Harvard Medical School, Boston, MA 02115; Dept. of",
"fullName": "Andy Tsai",
"givenName": "Andy",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Radiology, Brigham&Women's Hospital, Harvard Medical School, Boston, MA 02115",
"fullName": "Carl-Fredrik Westin",
"givenName": "Carl-Fredrik",
"surname": "Westin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electrical Eng.&Computer Science, University of Michigan, Ann Arbor, MI 48109",
"fullName": "Alfred O. Hero",
"givenName": "Alfred O.",
"surname": "Hero",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electrical Eng.&Computer Science, Mass. Institute of Technology, Cambridge, MA 02139",
"fullName": "Alan S. Willsky",
"givenName": "Alan S.",
"surname": "Willsky",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2007",
"issn": null,
"isbn": "1-4244-1179-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04270119",
"articleId": "12OmNyQGS7B",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04270122",
"articleId": "12OmNCxbXDG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2014/5179/0/07135996",
"title": "Variations of white matter tract length in regions of emotional regulation as assessed in alzheimer diffusion tensor images",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2014/07135996/12OmNAndiyd",
"parentPublication": {
"id": "proceedings/iciev/2014/5179/0",
"title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2008/2339/0/04562998",
"title": "Classification trees for fast segmentation of DTI brain fiber tracts",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2008/04562998/12OmNqBKU6s",
"parentPublication": {
"id": "proceedings/cvprw/2008/2339/0",
"title": "2008 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlsm/2001/1278/0/12780081",
"title": "Fiber Tract Mapping from Diffusion Tensor MRI",
"doi": null,
"abstractUrl": "/proceedings-article/vlsm/2001/12780081/12OmNqOffvN",
"parentPublication": {
"id": "proceedings/vlsm/2001/1278/0",
"title": "Proceedings of 1st IEEE Workshop on Variational and Level Set Methods in Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2009/3872/0/3872b341",
"title": "Assessment of Asymmetry in Pyramidal Tract by Using Fiber Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2009/3872b341/12OmNviHKc8",
"parentPublication": {
"id": "proceedings/isda/2009/3872/0",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmbia/2012/0354/0/06164744",
"title": "A fiber tracking method guided by volumetric tract segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/mmbia/2012/06164744/12OmNx7XH6C",
"parentPublication": {
"id": "proceedings/mmbia/2012/0354/0",
"title": "2012 IEEE Workshop on Mathematical Methods in Biomedical Image Analysis (MMBIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206500",
"title": "Classification of tensors and fiber tracts using Mercer-kernels encoding soft probabilistic spatial and diffusion information",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206500/12OmNyen1nx",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a694",
"title": "Using Fourier Descriptor Features in the Classification of White Matter Fiber Tracts in DTI",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a694/12OmNym2c2m",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061449",
"title": "Exploring 3D DTI Fiber Tracts with Linked 2D Representations",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061449/13rRUwI5Ug0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042344",
"title": "Exploration of the Brain’s White Matter Structure through Visual Abstraction and Multi-Scale Local Fiber Tract Contraction",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042344/13rRUxcbnCt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2018/6217/0/247100a305",
"title": "[Regular Paper] Corticospinal Tract (CST) Reconstruction Based on Fiber Orientation Distributions (FODs) Tractography",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2018/247100a305/17D45WwsQ7R",
"parentPublication": {
"id": "proceedings/bibe/2018/6217/0",
"title": "2018 IEEE 18th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvTjZWK",
"title": "1993 The Twenty-sixth Hawaii International Conference on System Sciences",
"acronym": "hicss",
"groupId": "1000730",
"volume": "2",
"displayVolume": "2",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBoNrqS",
"doi": "10.1109/HICSS.1993.284106",
"title": "Finding optimum wavefront of parallel computation",
"normalizedTitle": "Finding optimum wavefront of parallel computation",
"abstract": "The problem of finding the optimum wavefront that minimizes total computation time is discussed for iterative computations over two-dimensional arrays executed on linear or two-dimensional processor arrays. Assuming a continuum of data elements in two-dimensional arrays computed on a linear array of processors, efficient algorithms are presented to determine the optimum wavefront of the computation and the optimum partitioning of the wavefront into sections assigned to individual processors. An O(n/sup 2/) time algorithm, where n is the number of data dependence vectors, for finding the optimum wavefront for one-dimensional processor arrays is described. In addition, a method for finding a good wavefront for two-dimensional single-instruction, multiple-data (SIMD) machines is presented.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The problem of finding the optimum wavefront that minimizes total computation time is discussed for iterative computations over two-dimensional arrays executed on linear or two-dimensional processor arrays. Assuming a continuum of data elements in two-dimensional arrays computed on a linear array of processors, efficient algorithms are presented to determine the optimum wavefront of the computation and the optimum partitioning of the wavefront into sections assigned to individual processors. An O(n/sup 2/) time algorithm, where n is the number of data dependence vectors, for finding the optimum wavefront for one-dimensional processor arrays is described. In addition, a method for finding a good wavefront for two-dimensional single-instruction, multiple-data (SIMD) machines is presented.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The problem of finding the optimum wavefront that minimizes total computation time is discussed for iterative computations over two-dimensional arrays executed on linear or two-dimensional processor arrays. Assuming a continuum of data elements in two-dimensional arrays computed on a linear array of processors, efficient algorithms are presented to determine the optimum wavefront of the computation and the optimum partitioning of the wavefront into sections assigned to individual processors. An O(n/sup 2/) time algorithm, where n is the number of data dependence vectors, for finding the optimum wavefront for one-dimensional processor arrays is described. In addition, a method for finding a good wavefront for two-dimensional single-instruction, multiple-data (SIMD) machines is presented.",
"fno": "00284106",
"keywords": [
"Parallel Algorithms",
"SIMD Machines",
"Parallel Computation",
"Optimum Wavefront",
"Total Computation Time",
"Iterative Computations",
"Two Dimensional Arrays",
"Processor Arrays",
"Optimum Partitioning",
"Data Dependence Vectors",
"Concurrent Computing",
"Iterative Algorithms",
"Parallel Processing",
"Partitioning Algorithms",
"Data Flow Computing",
"Distributed Computing",
"Data Structures",
"Data Engineering",
"Computer Science",
"Linear Programming"
],
"authors": [
{
"affiliation": "IBM Corp., Poughkeepsie, NY, USA",
"fullName": "B. Sinharoy",
"givenName": "B.",
"surname": "Sinharoy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "B. Szymanski",
"givenName": "B.",
"surname": "Szymanski",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "1993-01-01T00:00:00",
"pubType": "proceedings",
"pages": "225,226,227,228,229,230,231,232,233,234",
"year": "1993",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00284105",
"articleId": "12OmNCzsKGj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00284107",
"articleId": "12OmNCbU2SW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpads/2001/1153/0/11530003",
"title": "Efficient Implementation of Edmonds' Algorithm for Finding Optimum Branchings on Associative Parallel Processors",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2001/11530003/12OmNALlckb",
"parentPublication": {
"id": "proceedings/icpads/2001/1153/0",
"title": "Proceedings. Eighth International Conference on Parallel and Distributed Systems. ICPADS 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asap/1996/7542/0/75420274",
"title": "A Synthesis System For Bus-Based Wavefront Array Architectures",
"doi": null,
"abstractUrl": "/proceedings-article/asap/1996/75420274/12OmNAObbzg",
"parentPublication": {
"id": "proceedings/asap/1996/7542/0",
"title": "Proceedings of International Conference on Application Specific Systems, Architectures and Processors: ASAP '96",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2002/1573/0/01016487",
"title": "Generating parallel programs from the wavefront design pattern",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2002/01016487/12OmNBZHiie",
"parentPublication": {
"id": "proceedings/ipdps/2002/1573/1",
"title": "Parallel and Distributed Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cssim/2009/3795/0/3795a120",
"title": "A Simple Optimum-Time Firing Squad Synchronization Algorithm for Two-Dimensional Rectangle Arrays",
"doi": null,
"abstractUrl": "/proceedings-article/cssim/2009/3795a120/12OmNqNXEqX",
"parentPublication": {
"id": "proceedings/cssim/2009/3795/0",
"title": "Computational Intelligence, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/arrays/1988/8860/0/00018078",
"title": "SYSTARS: A CAD tool for the synthesis and analysis of VLSI systolic/wavefront arrays",
"doi": null,
"abstractUrl": "/proceedings-article/arrays/1988/00018078/12OmNvjyxV4",
"parentPublication": {
"id": "proceedings/arrays/1988/8860/0",
"title": "Proceedings. International Conference on Systolic Arrays",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asap/1990/9089/0/00145445",
"title": "Mapping high-dimension wavefront computations to silicon",
"doi": null,
"abstractUrl": "/proceedings-article/asap/1990/00145445/12OmNxRF78Y",
"parentPublication": {
"id": "proceedings/asap/1990/9089/0",
"title": "Proceedings of the International Conference on Application Specific Array Processors",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hipc/2011/1951/0/06152717",
"title": "High-level template for the task-based parallel wavefront pattern",
"doi": null,
"abstractUrl": "/proceedings-article/hipc/2011/06152717/12OmNzR8Cwi",
"parentPublication": {
"id": "proceedings/hipc/2011/1951/0",
"title": "2011 18th International Conference on High Performance Computing (HiPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012064",
"title": "Residue arithmetic VLSI array architecture for manipulator pseudo-inverse Jacobian computation",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012064/12OmNzRqdGG",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1986/01/01676658",
"title": "Optimal Bounds for Finding Maximum on Array of Processors with k Global Buses",
"doi": null,
"abstractUrl": "/journal/tc/1986/01/01676658/13rRUILtJpT",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1982/11/01675922",
"title": "Wavefront Array Processor: Language, Architecture, and Applications",
"doi": null,
"abstractUrl": "/journal/tc/1982/11/01675922/13rRUzphDwO",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8wTfL",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqIzh4A",
"doi": "10.1109/ICPR.2008.4761241",
"title": "Geodesic K-means clustering",
"normalizedTitle": "Geodesic K-means clustering",
"abstract": "We introduce a class of geodesic distances and extend the K-means clustering algorithm to employ this distance metric. Empirically, we demonstrate that our geodesic K-means algorithm exhibits several desirable characteristics missing in the classical K-means. These include adjusting to varying densities of clusters, high levels of resistance to outliers, and handling clusters that are not linearly separable. Furthermore our comparative experiments show that geodesic K-means comes very close to competing with state-of-the-art algorithms such as spectral and hierarchical clustering.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a class of geodesic distances and extend the K-means clustering algorithm to employ this distance metric. Empirically, we demonstrate that our geodesic K-means algorithm exhibits several desirable characteristics missing in the classical K-means. These include adjusting to varying densities of clusters, high levels of resistance to outliers, and handling clusters that are not linearly separable. Furthermore our comparative experiments show that geodesic K-means comes very close to competing with state-of-the-art algorithms such as spectral and hierarchical clustering.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a class of geodesic distances and extend the K-means clustering algorithm to employ this distance metric. Empirically, we demonstrate that our geodesic K-means algorithm exhibits several desirable characteristics missing in the classical K-means. These include adjusting to varying densities of clusters, high levels of resistance to outliers, and handling clusters that are not linearly separable. Furthermore our comparative experiments show that geodesic K-means comes very close to competing with state-of-the-art algorithms such as spectral and hierarchical clustering.",
"fno": "04761241",
"keywords": [
"Graph Theory",
"Pattern Clustering",
"Geodesic K Means Clustering Algorithm",
"Geodesic Distance Metric",
"Graph Theory",
"Clustering Algorithms",
"Level Measurement",
"Robustness",
"Symmetric Matrices",
"Euclidean Distance",
"Computational Complexity",
"Clustering Methods",
"Extraterrestrial Measurements",
"Geophysics Computing",
"Q Measurement"
],
"authors": [
{
"affiliation": "Department of Electrical Engineering, Stanford University, USA",
"fullName": "Nima Asgharbeygi",
"givenName": "Nima",
"surname": "Asgharbeygi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Electrical Engineering, Stanford University, USA",
"fullName": "Arian Maleki",
"givenName": "Arian",
"surname": "Maleki",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1051-4651",
"isbn": "978-1-4244-2174-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04761240",
"articleId": "12OmNvnwVn2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04761242",
"articleId": "12OmNzFv4ds",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118a987",
"title": "Transitive Distance Clustering with K-Means Duality",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a987/12OmNAlNiD3",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcgin/2011/4464/0/4464a216",
"title": "Hybrid Bisect K-Means Clustering Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/bcgin/2011/4464a216/12OmNApLGNb",
"parentPublication": {
"id": "proceedings/bcgin/2011/4464/0",
"title": "2011 International Conference on Business Computing and Global Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bracis/2015/0016/0/0016a074",
"title": "Scalable Fast Evolutionary k-Means Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2015/0016a074/12OmNBgQFQz",
"parentPublication": {
"id": "proceedings/bracis/2015/0016/0",
"title": "2015 Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2013/1050/0/06642535",
"title": "MFWK-Means: Minkowski metric Fuzzy Weighted K-Means for high dimensional data clustering",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2013/06642535/12OmNC3o4Yr",
"parentPublication": {
"id": "proceedings/iri/2013/1050/0",
"title": "2013 IEEE 14th International Conference on Information Reuse & Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csbw/2005/2442/0/24420105",
"title": "Novel Hybrid Hierarchical-K-means Clustering Method (H-K-means) for Microarray Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/csbw/2005/24420105/12OmNC8dgiY",
"parentPublication": {
"id": "proceedings/csbw/2005/2442/0",
"title": "2005 IEEE Computational Systems Bioinformatics Conference Workshops and Poster Abstracts",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iitsi/2010/4020/0/4020a063",
"title": "Research on k-means Clustering Algorithm: An Improved k-means Clustering Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iitsi/2010/4020a063/12OmNrMZpFB",
"parentPublication": {
"id": "proceedings/iitsi/2010/4020/0",
"title": "Intelligent Information Technology and Security Informatics, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicn/2011/4587/0/4587a297",
"title": "Document Clustering Using K-Means, Heuristic K-Means and Fuzzy C-Means",
"doi": null,
"abstractUrl": "/proceedings-article/cicn/2011/4587a297/12OmNrNh0xG",
"parentPublication": {
"id": "proceedings/cicn/2011/4587/0",
"title": "Computational Intelligence and Communication Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2013/1309/0/06732734",
"title": "Determining an optimal value of K in K-means clustering",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2013/06732734/12OmNx6PisW",
"parentPublication": {
"id": "proceedings/bibm/2013/1309/0",
"title": "2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdcloud-socialcom-sustaincom/2016/3936/0/3936a242",
"title": "K*-Means: An Effective and Efficient K-Means Clustering Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/bdcloud-socialcom-sustaincom/2016/3936a242/12OmNxwENGN",
"parentPublication": {
"id": "proceedings/bdcloud-socialcom-sustaincom/2016/3936/0",
"title": "2016 IEEE International Conferences on Big Data and Cloud Computing (BDCloud), Social Computing and Networking (SocialCom), Sustainable Computing and Communications (SustainCom) (BDCloud-SocialCom-SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctec/2017/5784/0/578400a447",
"title": "Hybrid Dissimilarity Measurement for Intelligent Weight K-means Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icctec/2017/578400a447/1cks8g0GZTa",
"parentPublication": {
"id": "proceedings/icctec/2017/5784/0",
"title": "2017 International Conference on Computer Technology, Electronics and Communication (ICCTEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAsTgXc",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBqMDEE",
"doi": "10.1109/ICCVW.2011.6130497",
"title": "Stereo estimation of depth along virtual cut planes",
"normalizedTitle": "Stereo estimation of depth along virtual cut planes",
"abstract": "Stereo vision is broadly employed in robotics and intelligent vehicles for recovering the 3D structure of the environment. The scene depth is typically estimated by triangulation after associating pixels between views using a dense stereo matching approach. In the last few years, the image resolution has steadily increased due to the advances in camera technology. Unfortunately, achieving real-time stereo using large size images is difficult because of the computational cost of dense matching. An obvious solution is to re-sample the acquired input images, but this implies decreasing the accuracy of depth estimates. We propose an alternative that consists in performing the stereo reconstruction of the contour C where a pre-defined virtual cut plane intersects the scene. This approach enables a trade-off between runtime and 3D model resolution that does not interfere with depth accuracy. The profile cuts C are independently recovered using the SymStereo framework that has been recently introduced in [1]. It is proved through comparative experiments that SymStereo is particularly well suited for recovering depth along virtual cut planes, outperforming state-of-the-art stereo cost functions both in terms of accuracy and runtime.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Stereo vision is broadly employed in robotics and intelligent vehicles for recovering the 3D structure of the environment. The scene depth is typically estimated by triangulation after associating pixels between views using a dense stereo matching approach. In the last few years, the image resolution has steadily increased due to the advances in camera technology. Unfortunately, achieving real-time stereo using large size images is difficult because of the computational cost of dense matching. An obvious solution is to re-sample the acquired input images, but this implies decreasing the accuracy of depth estimates. We propose an alternative that consists in performing the stereo reconstruction of the contour C where a pre-defined virtual cut plane intersects the scene. This approach enables a trade-off between runtime and 3D model resolution that does not interfere with depth accuracy. The profile cuts C are independently recovered using the SymStereo framework that has been recently introduced in [1]. It is proved through comparative experiments that SymStereo is particularly well suited for recovering depth along virtual cut planes, outperforming state-of-the-art stereo cost functions both in terms of accuracy and runtime.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Stereo vision is broadly employed in robotics and intelligent vehicles for recovering the 3D structure of the environment. The scene depth is typically estimated by triangulation after associating pixels between views using a dense stereo matching approach. In the last few years, the image resolution has steadily increased due to the advances in camera technology. Unfortunately, achieving real-time stereo using large size images is difficult because of the computational cost of dense matching. An obvious solution is to re-sample the acquired input images, but this implies decreasing the accuracy of depth estimates. We propose an alternative that consists in performing the stereo reconstruction of the contour C where a pre-defined virtual cut plane intersects the scene. This approach enables a trade-off between runtime and 3D model resolution that does not interfere with depth accuracy. The profile cuts C are independently recovered using the SymStereo framework that has been recently introduced in [1]. It is proved through comparative experiments that SymStereo is particularly well suited for recovering depth along virtual cut planes, outperforming state-of-the-art stereo cost functions both in terms of accuracy and runtime.",
"fno": "06130497",
"keywords": [
"Image Matching",
"Image Reconstruction",
"Image Resolution",
"Image Sampling",
"Stereo Image Processing",
"Stereo Estimation",
"Virtual Cut Planes",
"Stereo Vision",
"Robotics",
"Intelligent Vehicles",
"3 D Structure Recovery",
"Scene Depth Estimation",
"Triangulation",
"Dense Stereo Matching Approach",
"Image Resolution",
"Image Resampling",
"Stereo Reconstruction",
"3 D Model Resolution",
"Sym Stereo Framework",
"Three Dimensional Displays",
"Cameras",
"Image Reconstruction",
"Convolution",
"Stereo Vision",
"Solid Modeling",
"Image Resolution"
],
"authors": [
{
"affiliation": "Institute of Systems and Robotics, Faculty of Sciences and Technology, University of Coimbra, Portugal",
"fullName": "Michel Antunes",
"givenName": "Michel",
"surname": "Antunes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Systems and Robotics, Faculty of Sciences and Technology, University of Coimbra, Portugal",
"fullName": "João P. Barreto",
"givenName": "João P.",
"surname": "Barreto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "2026-2033",
"year": "2011",
"issn": null,
"isbn": "978-1-4673-0063-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06130496",
"articleId": "12OmNyNQSJa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06130498",
"articleId": "12OmNwtEEGS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/06977434",
"title": "Depth Super-resolution by Fusing Depth Imaging and Stereo Vision with Structural Determinant Information Inference",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977434/12OmNAXPymB",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457429",
"title": "Environment modelling using spherical stereo imaging",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457429/12OmNBuL14N",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890185",
"title": "A Stereo-Vision-Assisted model for depth map super-resolution",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890185/12OmNs59JHy",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130395",
"title": "Interactive object segmentation for mono and stereo applications: Geodesic prior induced graph cut energy minimization",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130395/12OmNwF0BOb",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460252",
"title": "Depth-map merging for Multi-View Stereo with high resolution images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460252/12OmNwNwzMv",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a022",
"title": "Saliency Cut in Stereo Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a022/12OmNxwWoGA",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459406",
"title": "Multiperspective stereo matching and volumetric reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459406/12OmNy2JsZs",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imccc/2011/4519/0/4519a303",
"title": "Template Based Stereo Matching Using Graph-cut",
"doi": null,
"abstractUrl": "/proceedings-article/imccc/2011/4519a303/12OmNzSQdrW",
"parentPublication": {
"id": "proceedings/imccc/2011/4519/0",
"title": "Instrumentation, Measurement, Computer, Communication and Control, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761015",
"title": "Usage of needle maps and shadows to overcome depth edges in depth map reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761015/12OmNzkuKKM",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNynsbxl",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "1",
"displayVolume": "1",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyen1my",
"doi": "10.1109/3DV.2014.55",
"title": "High-Quality Depth Recovery via Interactive Multi-view Stereo",
"normalizedTitle": "High-Quality Depth Recovery via Interactive Multi-view Stereo",
"abstract": "Although multi-view stereo has been extensively studied during the past decades, automatically computing high-quality dense depth information from captured images/videos is still quite difficult. Many factors, such as serious occlusion, large texture less regions and strong reflection, easily cause erroneous depth recovery. In this paper, we present a novel semi-automatic multi-view stereo system, which can quickly create and repair depth from a monocular sequence taken by a freely moving camera. One of our main contributions is that we propose a novel multi-view stereo model incorporating prior constraints indicated by user interaction, which makes it possible to even handle Non-Lambertian surface that surely violates the photo-consistency constraint. Users only need to provide a coarse segmentation and a few user interactions, our system can automatically correct depth and refine boundary. With other priors and occlusion handling, the erroneous depth can be effectively corrected even for very challenging examples that are difficult for state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although multi-view stereo has been extensively studied during the past decades, automatically computing high-quality dense depth information from captured images/videos is still quite difficult. Many factors, such as serious occlusion, large texture less regions and strong reflection, easily cause erroneous depth recovery. In this paper, we present a novel semi-automatic multi-view stereo system, which can quickly create and repair depth from a monocular sequence taken by a freely moving camera. One of our main contributions is that we propose a novel multi-view stereo model incorporating prior constraints indicated by user interaction, which makes it possible to even handle Non-Lambertian surface that surely violates the photo-consistency constraint. Users only need to provide a coarse segmentation and a few user interactions, our system can automatically correct depth and refine boundary. With other priors and occlusion handling, the erroneous depth can be effectively corrected even for very challenging examples that are difficult for state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although multi-view stereo has been extensively studied during the past decades, automatically computing high-quality dense depth information from captured images/videos is still quite difficult. Many factors, such as serious occlusion, large texture less regions and strong reflection, easily cause erroneous depth recovery. In this paper, we present a novel semi-automatic multi-view stereo system, which can quickly create and repair depth from a monocular sequence taken by a freely moving camera. One of our main contributions is that we propose a novel multi-view stereo model incorporating prior constraints indicated by user interaction, which makes it possible to even handle Non-Lambertian surface that surely violates the photo-consistency constraint. Users only need to provide a coarse segmentation and a few user interactions, our system can automatically correct depth and refine boundary. With other priors and occlusion handling, the erroneous depth can be effectively corrected even for very challenging examples that are difficult for state-of-the-art methods.",
"fno": "7000a329",
"keywords": [
"Three Dimensional Displays",
"Videos",
"Image Color Analysis",
"Optimization",
"Shape",
"Cameras",
"Solid Modeling",
"Object Segmentation",
"Depth Recovery",
"Interactive Multi View Stereo"
],
"authors": [
{
"affiliation": "State Key Lab. of CAD&CG, Zhejiang Univ., Hangzhou, China",
"fullName": "Weifeng Chen",
"givenName": null,
"surname": "Weifeng Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Key Lab. of CAD&CG, Zhejiang Univ., Hangzhou, China",
"fullName": "Guofeng Zhang",
"givenName": null,
"surname": "Guofeng Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Key Lab. of CAD&CG, Zhejiang Univ., Hangzhou, China",
"fullName": "Xiaojun Xiang",
"givenName": null,
"surname": "Xiaojun Xiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Univ. of Hong Kong, Hong Kong, China",
"fullName": "Jiaya Jia",
"givenName": null,
"surname": "Jiaya Jia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Key Lab. of CAD&CG, Zhejiang Univ., Hangzhou, China",
"fullName": "Hujun Bao",
"givenName": null,
"surname": "Hujun Bao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-12-01T00:00:00",
"pubType": "proceedings",
"pages": "329-336",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-7000-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7000a319",
"articleId": "12OmNAKuoRB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7000a337",
"articleId": "12OmNxZkhvA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/06977434",
"title": "Depth Super-resolution by Fusing Depth Imaging and Stereo Vision with Structural Determinant Information Inference",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977434/12OmNAXPymB",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206712",
"title": "Continuous depth estimation for multi-view stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206712/12OmNBkP3zD",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2013/2322/0/2322a169",
"title": "A Novel Depth Recovery Approach from Multi-View Stereo Based Focusing",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2013/2322a169/12OmNvA1hF7",
"parentPublication": {
"id": "proceedings/icvrv/2013/2322/0",
"title": "2013 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a614",
"title": "Conditional Regressive Random Forest Stereo-Based Hand Depth Recovery",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a614/12OmNxWcHiG",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a369",
"title": "Polarimetric Multi-view Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a369/12OmNyfdOLS",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a069",
"title": "Dense and Occlusion-Robust Multi-view Stereo for Unstructured Videos",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a069/12OmNzcPAyC",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000a389",
"title": "Learning Descriptor, Confidence, and Depth Estimation in Multi-view Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000a389/17D45W2Wyzm",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i616",
"title": "Non-parametric Depth Distribution Modelling based Depth Inference for Multi-view Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i616/1H1jrZZKhfq",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428281",
"title": "High-Resolution Multi-View Stereo with Dynamic Depth Edge Flow",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428281/1uilFvw0hR6",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b290",
"title": "MeshMVS: Multi-View Stereo Guided Mesh Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b290/1zWEoi7ehZS",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Xtvpe1",
"doi": "10.1109/CVPR.2018.00682",
"title": "Depth-Aware Stereo Video Retargeting",
"normalizedTitle": "Depth-Aware Stereo Video Retargeting",
"abstract": "As compared with traditional video retargeting, stereo video retargeting poses new challenges because stereo video contains the depth information of salient objects and its time dynamics. In this work, we propose a depth-aware stereo video retargeting method by imposing the depth fidelity constraint. The proposed depth-aware retargeting method reconstructs the 3D scene to obtain the depth information of salient objects. We cast it as a constrained optimization problem, where the total cost function includes the shape, temporal and depth distortions of salient objects. As a result, the solution can preserve the shape, temporal and depth fidelity of salient objects simultaneously. It is demonstrated by experimental results that the depth-aware retargeting method achieves higher retargeting quality and provides better user experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As compared with traditional video retargeting, stereo video retargeting poses new challenges because stereo video contains the depth information of salient objects and its time dynamics. In this work, we propose a depth-aware stereo video retargeting method by imposing the depth fidelity constraint. The proposed depth-aware retargeting method reconstructs the 3D scene to obtain the depth information of salient objects. We cast it as a constrained optimization problem, where the total cost function includes the shape, temporal and depth distortions of salient objects. As a result, the solution can preserve the shape, temporal and depth fidelity of salient objects simultaneously. It is demonstrated by experimental results that the depth-aware retargeting method achieves higher retargeting quality and provides better user experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As compared with traditional video retargeting, stereo video retargeting poses new challenges because stereo video contains the depth information of salient objects and its time dynamics. In this work, we propose a depth-aware stereo video retargeting method by imposing the depth fidelity constraint. The proposed depth-aware retargeting method reconstructs the 3D scene to obtain the depth information of salient objects. We cast it as a constrained optimization problem, where the total cost function includes the shape, temporal and depth distortions of salient objects. As a result, the solution can preserve the shape, temporal and depth fidelity of salient objects simultaneously. It is demonstrated by experimental results that the depth-aware retargeting method achieves higher retargeting quality and provides better user experience.",
"fno": "642000g517",
"keywords": [
"Image Reconstruction",
"Object Detection",
"Optimisation",
"Stereo Image Processing",
"Video Signal Processing",
"Depth Information",
"Salient Objects",
"Depth Aware Stereo Video Retargeting Method",
"Depth Fidelity Constraint",
"Depth Distortions",
"Constrained Optimization Problem",
"3 D Scene Reconstruction",
"Cost Function",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Shape",
"Distortion",
"Trajectory",
"Cameras",
"Cost Function"
],
"authors": [
{
"affiliation": null,
"fullName": "Bing Li",
"givenName": "Bing",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chia-Wen Lin",
"givenName": "Chia-Wen",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Boxin Shi",
"givenName": "Boxin",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tiejun Huang",
"givenName": "Tiejun",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wen Gao",
"givenName": "Wen",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "C.-C. Jay Kuo",
"givenName": "C.-C. Jay",
"surname": "Kuo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "6517-6525",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000g508",
"articleId": "17D45XDIXTE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000g526",
"articleId": "17D45WYQJ7L",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cis/2015/8660/0/8660a179",
"title": "Image Retargeting Based on a New Salient Region Detection Method",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2015/8660a179/12OmNBtl1rY",
"parentPublication": {
"id": "proceedings/cis/2015/8660/0",
"title": "2015 11th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2016/7258/0/07552873",
"title": "Learning-based quality assessment of retargeted stereoscopic images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552873/12OmNrFkeQd",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a571",
"title": "On Preserving Structure in Stereo Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a571/12OmNxGj9Uh",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177529",
"title": "Pixel fusion based stereo image retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177529/12OmNzdoMnC",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101677",
"title": "Content-Aware Video Retargeting Using Object-Preserving Warping",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101677/13rRUxASuMC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a061",
"title": "Multi-scale CNN Stereo and Pattern Removal Technique for Underwater Active Stereo System",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a061/17D45XoXP3H",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b655",
"title": "Warping-Based Stereoscopic 3D Video Retargeting With Depth Remapping",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b655/18j8LvV2AJG",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a342",
"title": "Objective Quality Assessment Method for Stereoscopic Image Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a342/1cJ0BFEtKeI",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a348",
"title": "Web Stereo Video Supervision for Depth Prediction from Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a348/1ezRCGN0q7m",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b597",
"title": "Bi3D: Stereo Depth Estimation via Binary Classifications",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b597/1m3nztOpewg",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1B12DGrwoyQ",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1B12RtBfkly",
"doi": "10.1109/WACV51458.2022.00030",
"title": "SIDE: Center-based Stereo 3D Detector with Structure-aware Instance Depth Estimation",
"normalizedTitle": "SIDE: Center-based Stereo 3D Detector with Structure-aware Instance Depth Estimation",
"abstract": "3D detection plays an indispensable role in environment perception. Due to the high cost of commonly used LiDAR sensor, stereo vision based 3D detection, as an economical yet effective setting, attracts more attention recently. For these approaches based on 2D images, accurate depth information is the key to achieve 3D detection, and most existing methods resort to a preliminary stage for depth estimation. They mainly focus on the global depth and neglect the property of depth information in this specific task, namely, sparsity and locality, where exactly accurate depth is only needed for these 3D bounding boxes. Motivated by this finding, we propose a stereo-image based anchor-free 3D detection method, called structure-aware stereo 3D detector (termed as SIDE), where we explore the instance-level depth information via constructing the cost volume from RoIs of each object. Due to the information sparsity of local cost volume, we further introduce match reweighting and structure-aware attention, to make the depth information more concentrated. Experiments conducted on the KITTI dataset show that our method achieves the state-of-the-art performance compared to existing methods without depth map supervision.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D detection plays an indispensable role in environment perception. Due to the high cost of commonly used LiDAR sensor, stereo vision based 3D detection, as an economical yet effective setting, attracts more attention recently. For these approaches based on 2D images, accurate depth information is the key to achieve 3D detection, and most existing methods resort to a preliminary stage for depth estimation. They mainly focus on the global depth and neglect the property of depth information in this specific task, namely, sparsity and locality, where exactly accurate depth is only needed for these 3D bounding boxes. Motivated by this finding, we propose a stereo-image based anchor-free 3D detection method, called structure-aware stereo 3D detector (termed as SIDE), where we explore the instance-level depth information via constructing the cost volume from RoIs of each object. Due to the information sparsity of local cost volume, we further introduce match reweighting and structure-aware attention, to make the depth information more concentrated. Experiments conducted on the KITTI dataset show that our method achieves the state-of-the-art performance compared to existing methods without depth map supervision.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D detection plays an indispensable role in environment perception. Due to the high cost of commonly used LiDAR sensor, stereo vision based 3D detection, as an economical yet effective setting, attracts more attention recently. For these approaches based on 2D images, accurate depth information is the key to achieve 3D detection, and most existing methods resort to a preliminary stage for depth estimation. They mainly focus on the global depth and neglect the property of depth information in this specific task, namely, sparsity and locality, where exactly accurate depth is only needed for these 3D bounding boxes. Motivated by this finding, we propose a stereo-image based anchor-free 3D detection method, called structure-aware stereo 3D detector (termed as SIDE), where we explore the instance-level depth information via constructing the cost volume from RoIs of each object. Due to the information sparsity of local cost volume, we further introduce match reweighting and structure-aware attention, to make the depth information more concentrated. Experiments conducted on the KITTI dataset show that our method achieves the state-of-the-art performance compared to existing methods without depth map supervision.",
"fno": "091500a225",
"keywords": [
"Object Detection",
"Optical Radar",
"Stereo Image Processing",
"Neglect",
"Exactly Accurate Depth",
"3 D Bounding Boxes",
"Anchor Free 3 D Detection Method",
"Called Structure Aware Stereo 3 D Detector",
"Instance Level Depth Information",
"Information Sparsity",
"Local Cost Volume",
"Structure Aware Attention",
"Depth Map Supervision",
"Center Based Stereo 3 D Detector",
"Structure Aware Instance Depth Estimation",
"Environment Perception",
"Commonly Used Li DAR Sensor",
"Stereo Vision",
"Economical Yet Effective Setting",
"Accurate Depth Information",
"Existing Methods Resort",
"Preliminary Stage",
"Global Depth",
"Computer Vision",
"Three Dimensional Displays",
"Costs",
"Laser Radar",
"Computational Modeling",
"Estimation",
"Detectors",
"3 D Computer Vision Object Detection Recognition Categorization",
"Scene Understanding"
],
"authors": [
{
"affiliation": "ShanghaiTech University",
"fullName": "Xidong Peng",
"givenName": "Xidong",
"surname": "Peng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Chinese University of Hong Kong",
"fullName": "Xinge Zhu",
"givenName": "Xinge",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Chinese University of Hong Kong",
"fullName": "Tai Wang",
"givenName": "Tai",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ShanghaiTech University",
"fullName": "Yuexin Ma",
"givenName": "Yuexin",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-01-01T00:00:00",
"pubType": "proceedings",
"pages": "225-234",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-0915-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1B12RqaDLVe",
"name": "pwacv202209150-09706939s1-mm_091500a225.zip",
"size": "1.29 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pwacv202209150-09706939s1-mm_091500a225.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "091500a215",
"articleId": "1B13fkQ4USI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "091500a235",
"articleId": "1B13jeDLlG8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tp/2018/05/07932113",
"title": "3D Object Proposals Using Stereo Imagery for Accurate Object Class Detection",
"doi": null,
"abstractUrl": "/journal/tp/2018/05/07932113/13rRUxCitzS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d133",
"title": "LIGA-Stereo: Learning LiDAR Geometry Aware Representations for Stereo-based 3D Detector",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d133/1BmFAZXbK0g",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600a877",
"title": "Pseudo-Stereo for Monocular 3D Object Detection in Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600a877/1H0Llp57Z9C",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600d783",
"title": "MonoGround: Detecting Monocular 3D Objects from the Ground",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600d783/1H1niPwcrug",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a663",
"title": "Dense Voxel Fusion for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a663/1KxUCsBZVny",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300g332",
"title": "Noise-Aware Unsupervised Deep Lidar-Stereo Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300g332/1gys4WToTE4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0545",
"title": "Disp R-CNN: Stereo 3D Object Detection via Shape Prior Guided Instance Disparity Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0545/1m3nkDPTta8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2533",
"title": "DSGN: Deep Stereo Geometry Network for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2533/1m3ntst40ta",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800n3012",
"title": "IDA-3D: Instance-Depth-Aware 3D Object Detection From Stereo Vision for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800n3012/1m3o8mse49O",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h542",
"title": "LiDAR R-CNN: An Efficient and Universal 3D Object Detector",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h542/1yeIgfBLnI4",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmFAZXbK0g",
"doi": "10.1109/ICCV48922.2021.00314",
"title": "LIGA-Stereo: Learning LiDAR Geometry Aware Representations for Stereo-based 3D Detector",
"normalizedTitle": "LIGA-Stereo: Learning LiDAR Geometry Aware Representations for Stereo-based 3D Detector",
"abstract": "Stereo-based 3D detection aims at detecting 3D objects from stereo images, which provides a low-cost solution for 3D perception. However, its performance is still inferior compared with LiDAR-based detection algorithms. To detect and localize accurate 3D bounding boxes, LiDAR-based detectors encode high-level representations from Li-DAR point clouds, such as accurate object boundaries and surface normal directions. In contrast, high-level features learned by stereo-based detectors are easily affected by the erroneous depth estimation due to the limitation of stereo matching. To solve the problem, we propose LIGA-Stereo (LiDAR Geometry Aware Stereo Detector) to learn stereo-based 3D detectors under the guidance of high-level geometry-aware representations of LiDAR-based detection models. In addition, we found existing voxel-based stereo detectors failed to learn semantic features effectively from indirect 3D supervisions. We attach an auxiliary 2D detection head to provide direct 2D semantic supervisions. Experiment results show that the above two strategies improved the geometric and semantic representation capabilities. Compared with the state-of-the-art stereo detector, our method has improved the 3D detection performance of cars, pedestrians, cyclists by 10.44%, 5.69%, 5.97% mAP respectively on the official KITTI benchmark. The gap between stereo-based and LiDAR-based 3D detectors is further narrowed. The code is available at https://xy-guo.github.io/liga/.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Stereo-based 3D detection aims at detecting 3D objects from stereo images, which provides a low-cost solution for 3D perception. However, its performance is still inferior compared with LiDAR-based detection algorithms. To detect and localize accurate 3D bounding boxes, LiDAR-based detectors encode high-level representations from Li-DAR point clouds, such as accurate object boundaries and surface normal directions. In contrast, high-level features learned by stereo-based detectors are easily affected by the erroneous depth estimation due to the limitation of stereo matching. To solve the problem, we propose LIGA-Stereo (LiDAR Geometry Aware Stereo Detector) to learn stereo-based 3D detectors under the guidance of high-level geometry-aware representations of LiDAR-based detection models. In addition, we found existing voxel-based stereo detectors failed to learn semantic features effectively from indirect 3D supervisions. We attach an auxiliary 2D detection head to provide direct 2D semantic supervisions. Experiment results show that the above two strategies improved the geometric and semantic representation capabilities. Compared with the state-of-the-art stereo detector, our method has improved the 3D detection performance of cars, pedestrians, cyclists by 10.44%, 5.69%, 5.97% mAP respectively on the official KITTI benchmark. The gap between stereo-based and LiDAR-based 3D detectors is further narrowed. The code is available at https://xy-guo.github.io/liga/.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Stereo-based 3D detection aims at detecting 3D objects from stereo images, which provides a low-cost solution for 3D perception. However, its performance is still inferior compared with LiDAR-based detection algorithms. To detect and localize accurate 3D bounding boxes, LiDAR-based detectors encode high-level representations from Li-DAR point clouds, such as accurate object boundaries and surface normal directions. In contrast, high-level features learned by stereo-based detectors are easily affected by the erroneous depth estimation due to the limitation of stereo matching. To solve the problem, we propose LIGA-Stereo (LiDAR Geometry Aware Stereo Detector) to learn stereo-based 3D detectors under the guidance of high-level geometry-aware representations of LiDAR-based detection models. In addition, we found existing voxel-based stereo detectors failed to learn semantic features effectively from indirect 3D supervisions. We attach an auxiliary 2D detection head to provide direct 2D semantic supervisions. Experiment results show that the above two strategies improved the geometric and semantic representation capabilities. Compared with the state-of-the-art stereo detector, our method has improved the 3D detection performance of cars, pedestrians, cyclists by 10.44%, 5.69%, 5.97% mAP respectively on the official KITTI benchmark. The gap between stereo-based and LiDAR-based 3D detectors is further narrowed. The code is available at https://xy-guo.github.io/liga/.",
"fno": "281200d133",
"keywords": [
"Geometry",
"Point Cloud Compression",
"Solid Modeling",
"Three Dimensional Displays",
"Laser Radar",
"Semantics",
"Detectors",
"Detection And Localization In 2 D And 3 D",
"Stereo",
"3 D From Multiview And Other Sensors"
],
"authors": [
{
"affiliation": "The Chinese University of Hong Kong,CUHK-SenseTime Joint Laboratory",
"fullName": "Xiaoyang Guo",
"givenName": "Xiaoyang",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Chinese University of Hong Kong,CUHK-SenseTime Joint Laboratory",
"fullName": "Shaoshuai Shi",
"givenName": "Shaoshuai",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Chinese University of Hong Kong,CUHK-SenseTime Joint Laboratory",
"fullName": "Xiaogang Wang",
"givenName": "Xiaogang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Chinese University of Hong Kong,CUHK-SenseTime Joint Laboratory",
"fullName": "Hongsheng Li",
"givenName": "Hongsheng",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "3133-3143",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200d122",
"articleId": "1BmHCIAcJwI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200d144",
"articleId": "1BmHhfdx7dm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/irc/2021/3416/0/341600a115",
"title": "Single Frame Lidar and Stereo Camera Calibration Using Registration of 3D Planes",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2021/341600a115/1ANLMp9fjKo",
"parentPublication": {
"id": "proceedings/irc/2021/3416/0",
"title": "2021 Fifth IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d743",
"title": "Multi-Echo LiDAR for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d743/1BmFBHEQzG8",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200h878",
"title": "Fooling LiDAR Perception via Adversarial Trajectory Perturbation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200h878/1BmKjS69hrq",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09858927",
"title": "A Novel Grid-Based Geometry Compression Framework for Spinning Lidar Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09858927/1G9EN6WL3KE",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600q6343",
"title": "LiDAR Snowfall Simulation for Robust 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600q6343/1H0LcEoVh6g",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2022/7260/0/726000a072",
"title": "Experimental Assessment of Feature-based Lidar Odometry and Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2022/726000a072/1Kckj73Kc4U",
"parentPublication": {
"id": "proceedings/irc/2022/7260/0",
"title": "2022 Sixth IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a663",
"title": "Dense Voxel Fusion for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a663/1KxUCsBZVny",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300g332",
"title": "Noise-Aware Unsupervised Deep Lidar-Stereo Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300g332/1gys4WToTE4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2533",
"title": "DSGN: Deep Stereo Geometry Network for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2533/1m3ntst40ta",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h542",
"title": "LiDAR R-CNN: An Efficient and Universal 3D Object Detector",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h542/1yeIgfBLnI4",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1t7mQaZpzb2",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"acronym": "hpcc-dss-smartcity",
"groupId": "1002461",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1t7n66fqm5i",
"doi": "10.1109/HPCC-SmartCity-DSS50907.2020.00106",
"title": "Attention-guided Multi-view Stereo Network For Depth Estimation",
"normalizedTitle": "Attention-guided Multi-view Stereo Network For Depth Estimation",
"abstract": "The purpose of the Multi-View Stereo is to restore the target 3D geometric model from multi-perspective images. There are several problems with the existing approaches based on deep learning, such as missing the detailed information in the predicted depth map, the low surface accuracy, and the incomplete reconstructed 3D point cloud model. In order to overcome these problems, we propose the Attention-guided Multiview Stereo Network For 3D Depth Estimation(AG-MVSNet). We combine the camera geometry with the deep neural network. And we adopt the coarse-to-fine deep learning framework to restore the target 3D geometry model. High-quality detailed feature information has an important influence on multi-view 3D reconstruction, and reference images in the natural environment contain detailed feature information which is needed in the reconstruction process. Therefore, we use the detailed feature information from different scales of reference images to restore the lost details of the high-level features. The quantitative and qualitative experimental results show that the proposed algorithm is more complete than the common multi-view 3D reconstruction algorithms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The purpose of the Multi-View Stereo is to restore the target 3D geometric model from multi-perspective images. There are several problems with the existing approaches based on deep learning, such as missing the detailed information in the predicted depth map, the low surface accuracy, and the incomplete reconstructed 3D point cloud model. In order to overcome these problems, we propose the Attention-guided Multiview Stereo Network For 3D Depth Estimation(AG-MVSNet). We combine the camera geometry with the deep neural network. And we adopt the coarse-to-fine deep learning framework to restore the target 3D geometry model. High-quality detailed feature information has an important influence on multi-view 3D reconstruction, and reference images in the natural environment contain detailed feature information which is needed in the reconstruction process. Therefore, we use the detailed feature information from different scales of reference images to restore the lost details of the high-level features. The quantitative and qualitative experimental results show that the proposed algorithm is more complete than the common multi-view 3D reconstruction algorithms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The purpose of the Multi-View Stereo is to restore the target 3D geometric model from multi-perspective images. There are several problems with the existing approaches based on deep learning, such as missing the detailed information in the predicted depth map, the low surface accuracy, and the incomplete reconstructed 3D point cloud model. In order to overcome these problems, we propose the Attention-guided Multiview Stereo Network For 3D Depth Estimation(AG-MVSNet). We combine the camera geometry with the deep neural network. And we adopt the coarse-to-fine deep learning framework to restore the target 3D geometry model. High-quality detailed feature information has an important influence on multi-view 3D reconstruction, and reference images in the natural environment contain detailed feature information which is needed in the reconstruction process. Therefore, we use the detailed feature information from different scales of reference images to restore the lost details of the high-level features. The quantitative and qualitative experimental results show that the proposed algorithm is more complete than the common multi-view 3D reconstruction algorithms.",
"fno": "764900a808",
"keywords": [
"Cameras",
"Computerised Instrumentation",
"Deep Learning Artificial Intelligence",
"Image Reconstruction",
"Image Restoration",
"Spatial Variables Measurement",
"Stereo Image Processing",
"Camera Geometry",
"Deep Neural Network",
"Coarse To Fine Deep Learning Framework",
"Target 3 D Geometry Model",
"High Quality Detailed Feature Information",
"Multiview 3 D Reconstruction Algorithms",
"Depth Estimation",
"Target 3 D Geometric Model",
"Depth Map Prediction",
"Low Surface Accuracy",
"Reconstructed 3 D Point Cloud Model",
"Attention Guided Multiview Stereo Network",
"Multiperspective Imaging",
"AG MVS Net",
"Deep Learning",
"Geometry",
"Solid Modeling",
"Three Dimensional Displays",
"Neural Networks",
"Predictive Models",
"Prediction Algorithms",
"Depth Map",
"Point Cloud",
"3 D Reconstruction"
],
"authors": [
{
"affiliation": "Ningxia University,School of Information Engineering,Yinchuan,China,750021",
"fullName": "Penghui Sun",
"givenName": "Penghui",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ningxia University,School of Information Engineering,Yinchuan,China,750021",
"fullName": "Suping Wu",
"givenName": "Suping",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ningxia University,School of Information Engineering,Yinchuan,China,750021",
"fullName": "Kui Lin",
"givenName": "Kui",
"surname": "Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hpcc-dss-smartcity",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "808-815",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7649-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "764900a802",
"articleId": "1t7n6YgjQGs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "764900a816",
"articleId": "1t7n4wD7cFG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2014/7000/1/7000a139",
"title": "Detailed 3D Model Driven Single View Scene Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a139/12OmNAXxXid",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csnt/2012/4692/0/4692a161",
"title": "Depth Measurement and 3D Reconstruction of Stereo Images",
"doi": null,
"abstractUrl": "/proceedings-article/csnt/2012/4692a161/12OmNzEmFFY",
"parentPublication": {
"id": "proceedings/csnt/2012/4692/0",
"title": "Communication Systems and Network Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/insai/2021/0859/0/085900a129",
"title": "A Survey of Multi View Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/insai/2021/085900a129/1CHx04T0WM8",
"parentPublication": {
"id": "proceedings/insai/2021/0859/0",
"title": "2021 International Conference on Networking Systems of AI (INSAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icacte/2021/4244/0/424400a020",
"title": "Multi-view 3D Reconstruction with Self-attention",
"doi": null,
"abstractUrl": "/proceedings-article/icacte/2021/424400a020/1E2wLtY6BQA",
"parentPublication": {
"id": "proceedings/icacte/2021/4244/0",
"title": "2021 14th International Conference on Advanced Computer Theory and Engineering (ICACTE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i655",
"title": "PlaneMVS: 3D Plane Reconstruction from Multi-View Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i655/1H0NwN7QUAU",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c036",
"title": "Mesh-Guided Multi-View Stereo With Pyramid Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c036/1m3nZpSzuaQ",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428171",
"title": "Vanet: a View Attention Guided Network for 3d Reconstruction from Single and Multi-View Images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428171/1uim7OZF1AY",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a535",
"title": "StereoPIFu: Depth Aware Clothed Human Digitization via Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a535/1yeLdh5lwSA",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a700",
"title": "3DVNet: Multi-View Depth Prediction and Volumetric Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a700/1zWEh9peydi",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b290",
"title": "MeshMVS: Multi-View Stereo Guided Mesh Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b290/1zWEoi7ehZS",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNB8Cj92",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvA1hAB",
"doi": "10.1109/ICMEW.2014.6890691",
"title": "Transform coding in AVS2",
"normalizedTitle": "Transform coding in AVS2",
"abstract": "This paper describes the transform coding in the second generation of Audio-video Coding Standard (AVS2). In AVS2, 16-bit integer DCT transform scheme is adopted, with the transform block size varies from 4×4 to 32×32, and the smaller transform kernels are completely embedded in the larger ones. To keep strong decorrelation capability of the transform, a principle is proposed in this paper to design the transform kernel by jointly minimizing DCT distortion, orthogonality and normalization. Furthermore, complexity of the transform is analyzed based on the proposed hybrid butterfly architecture, in which the transform is decomposed into a butterfly structure and a low-cost matrix multiplication. Experiment result shows that 25.7% additions and 71.0% shift operations can be saved by using this architecture compared with the partial butterfly one.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes the transform coding in the second generation of Audio-video Coding Standard (AVS2). In AVS2, 16-bit integer DCT transform scheme is adopted, with the transform block size varies from 4×4 to 32×32, and the smaller transform kernels are completely embedded in the larger ones. To keep strong decorrelation capability of the transform, a principle is proposed in this paper to design the transform kernel by jointly minimizing DCT distortion, orthogonality and normalization. Furthermore, complexity of the transform is analyzed based on the proposed hybrid butterfly architecture, in which the transform is decomposed into a butterfly structure and a low-cost matrix multiplication. Experiment result shows that 25.7% additions and 71.0% shift operations can be saved by using this architecture compared with the partial butterfly one.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes the transform coding in the second generation of Audio-video Coding Standard (AVS2). In AVS2, 16-bit integer DCT transform scheme is adopted, with the transform block size varies from 4×4 to 32×32, and the smaller transform kernels are completely embedded in the larger ones. To keep strong decorrelation capability of the transform, a principle is proposed in this paper to design the transform kernel by jointly minimizing DCT distortion, orthogonality and normalization. Furthermore, complexity of the transform is analyzed based on the proposed hybrid butterfly architecture, in which the transform is decomposed into a butterfly structure and a low-cost matrix multiplication. Experiment result shows that 25.7% additions and 71.0% shift operations can be saved by using this architecture compared with the partial butterfly one.",
"fno": "06890691",
"keywords": [
"Discrete Cosine Transforms",
"Kernel",
"Transform Coding",
"Bit Rate",
"Complexity Theory",
"Laplace Equations",
"Architecture",
"AVS 2",
"DCT",
"Transform Coding"
],
"authors": [
{
"affiliation": "Institute of Information and Communication Engineering, Zhejiang University, Zhejiang Provincial Key Laboratory of Information Network Technology, China",
"fullName": "Silong Wang",
"givenName": "Silong",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Information and Communication Engineering, Zhejiang University, Zhejiang Provincial Key Laboratory of Information Network Technology, China",
"fullName": "Xingguo Zhu",
"givenName": "Xingguo",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Information and Communication Engineering, Zhejiang University, Zhejiang Provincial Key Laboratory of Information Network Technology, China",
"fullName": "Dandan Ding",
"givenName": "Dandan",
"surname": "Ding",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Information and Communication Engineering, Zhejiang University, Zhejiang Provincial Key Laboratory of Information Network Technology, China",
"fullName": "Lu Yu",
"givenName": "Lu",
"surname": "Yu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2014",
"issn": "1945-7871",
"isbn": "978-1-4799-4717-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06890690",
"articleId": "12OmNzd7bqJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06890692",
"articleId": "12OmNrJAe4H",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dcc/2016/1853/0/07786151",
"title": "Enhanced Multiple Transform for Video Coding",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786151/12OmNrAMEJ5",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/paccs/2009/3614/0/3614a011",
"title": "DCT-Based Gabor Transform for Long or Infinite Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/paccs/2009/3614a011/12OmNvDZESu",
"parentPublication": {
"id": "proceedings/paccs/2009/3614/0",
"title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sicon/1993/1445/1/00515807",
"title": "Simple orthogonal transform for image coding",
"doi": null,
"abstractUrl": "/proceedings-article/sicon/1993/00515807/12OmNx0A7H8",
"parentPublication": {
"id": "proceedings/sicon/1993/1445/1",
"title": "Proceedings of IEEE Singapore International Conference on Networks/International Conference on Information Engineering '93",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cccm/2008/3290/2/3290b099",
"title": "DCT-Based Real-Valued Discrete Gabor Transform and Its Fast Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/cccm/2008/3290b099/12OmNx5pj3j",
"parentPublication": {
"id": "proceedings/cccm/2008/3290/3",
"title": "Computing, Communication, Control and Management, ISECS International Colloquium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890695",
"title": "Intra coding of AVS2 Video Coding Standard",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890695/12OmNxuFBt2",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890699",
"title": "On a 10-bit coding profile for AVS2 standard",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890699/12OmNy4r3MU",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/paccs/2009/3614/0/3614a449",
"title": "Generalized Discrete Cosine Transform",
"doi": null,
"abstractUrl": "/proceedings-article/paccs/2009/3614a449/12OmNyUWRa0",
"parentPublication": {
"id": "proceedings/paccs/2009/3614/0",
"title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asap/1997/7958/0/79580499",
"title": "An efficient architecture for the in place fast cosine transform",
"doi": null,
"abstractUrl": "/proceedings-article/asap/1997/79580499/12OmNyqRnqJ",
"parentPublication": {
"id": "proceedings/asap/1997/7958/0",
"title": "Proceedings IEEE International Conference on Application-Specific Systems, Architectures and Processors",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2016/2179/0/2179a241",
"title": "A Fast and Lossless IDCT Design for AVS2 Codec",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2016/2179a241/12OmNzaQoin",
"parentPublication": {
"id": "proceedings/bigmm/2016/2179/0",
"title": "2016 IEEE Second International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/1998/04/00736144",
"title": "Algorithm-based low-power transform coding architectures: the multirate approach",
"doi": null,
"abstractUrl": "/journal/si/1998/04/00736144/13rRUyfKIFa",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvzJG47",
"title": "Proceedings of 1994 28th Asilomar Conference on Signals, Systems and Computers",
"acronym": "acssc",
"groupId": "1000671",
"volume": "2",
"displayVolume": "2",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxdm4Ke",
"doi": "10.1109/ACSSC.1994.471599",
"title": "Lapped multiple bases realizations for the transform coding of still images",
"normalizedTitle": "Lapped multiple bases realizations for the transform coding of still images",
"abstract": "We describe a system for still image compression which uses several lapped orthogonal transform (LOT) sets in a multiple bases realization algorithm, the recursive residual projection (RRP) algorithm. Newly developed RRP algorithms are shown to reduce the number of encoded transform coefficients 20% beyond the DCT-only compression standard, JPEG. These algorithms still suffer from the problem of block-discontinuities at the boundaries of the segmented image. We extend these algorithms to use several newly developed LOT bases. Fast hardware implementations of these algorithms are presented.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe a system for still image compression which uses several lapped orthogonal transform (LOT) sets in a multiple bases realization algorithm, the recursive residual projection (RRP) algorithm. Newly developed RRP algorithms are shown to reduce the number of encoded transform coefficients 20% beyond the DCT-only compression standard, JPEG. These algorithms still suffer from the problem of block-discontinuities at the boundaries of the segmented image. We extend these algorithms to use several newly developed LOT bases. Fast hardware implementations of these algorithms are presented.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe a system for still image compression which uses several lapped orthogonal transform (LOT) sets in a multiple bases realization algorithm, the recursive residual projection (RRP) algorithm. Newly developed RRP algorithms are shown to reduce the number of encoded transform coefficients 20% beyond the DCT-only compression standard, JPEG. These algorithms still suffer from the problem of block-discontinuities at the boundaries of the segmented image. We extend these algorithms to use several newly developed LOT bases. Fast hardware implementations of these algorithms are presented.",
"fno": "00471599",
"keywords": [
"Transform Coding",
"Image Coding",
"Data Compression",
"Recursive Functions",
"Image Segmentation",
"Block Codes",
"Discrete Cosine Transforms",
"Lapped Multiple Bases",
"Transform Coding",
"Still Images",
"Image Compression",
"Lapped Orthogonal Transform",
"Recursive Residual Projection Algorithm",
"Encoded Transform Coefficients",
"DCT Only Compression Standard",
"JPEG",
"Block Discontinuities",
"Segmented Image",
"Fast Hardware",
"Transform Coding",
"Image Coding",
"Image Reconstruction",
"Pixel",
"Standards Development",
"Image Segmentation",
"Hardware",
"Speech",
"Block Codes",
"Tiles"
],
"authors": [
{
"affiliation": "Sch. of Electr. Eng., Oklahoma Univ., Norman, OK, USA",
"fullName": "V. DeBrunner",
"givenName": "V.",
"surname": "DeBrunner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Electr. Eng., Oklahoma Univ., Norman, OK, USA",
"fullName": "Lixiang Chen",
"givenName": null,
"surname": "Lixiang Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Electr. Eng., Oklahoma Univ., Norman, OK, USA",
"fullName": "Hongjian Li",
"givenName": null,
"surname": "Hongjian Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acssc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "943,944,945,946,947",
"year": "1994",
"issn": "1058-6393",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00471598",
"articleId": "12OmNBtCCCM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00471600",
"articleId": "12OmNAsTgUp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/1996/3192/1/00540278",
"title": "Mixed Malvar-wavelets for non-stationary signal representation",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1996/00540278/12OmNAkWvOa",
"parentPublication": {
"id": "proceedings/icassp/1996/3192/1",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100290",
"title": "Variable block size lapped transforms",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100290/12OmNBp52Il",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100294",
"title": "On the use of (lapped) multiple transforms in still image compression",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100294/12OmNwlZu0B",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2010/9992/0/05711816",
"title": "A pixel-domain post-processing technique to reduce the blocking artifacts in transform-coded images",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2010/05711816/12OmNxGAL0g",
"parentPublication": {
"id": "proceedings/isspit/2010/9992/0",
"title": "2010 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576449",
"title": "Adaptive Delaunay triangulation for attractor image coding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576449/12OmNxGja3X",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/3/81833686",
"title": "Variable block size adaptive lapped transform-based image coding",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81833686/12OmNxuXcz0",
"parentPublication": {
"id": "proceedings/icip/1997/8183/3",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1995/2431/2/00480571",
"title": "The generalized lapped transform (GLT) for subband coding applications",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1995/00480571/12OmNyY4rqX",
"parentPublication": {
"id": "proceedings/icassp/1995/2431/2",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2017/6721/0/07921909",
"title": "Signal Recovery in Compressive Sensing via Multiple Sparsifying Bases",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2017/07921909/12OmNzdGnua",
"parentPublication": {
"id": "proceedings/dcc/2017/6721/0",
"title": "2017 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1994/5320/0/00287833",
"title": "Image data compression using multiple bases representation",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1994/00287833/12OmNzdoMEH",
"parentPublication": {
"id": "proceedings/ssst/1994/5320/0",
"title": "Proceedings of 26th Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196701",
"title": "Reduction of blocking effects in image coding with a lapped orthogonal transform",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196701/12OmNzlUKIe",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvTBBbt",
"title": "Education Technology and Computer Science, International Workshop on",
"acronym": "etcs",
"groupId": "1002740",
"volume": "3",
"displayVolume": "3",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxzuMKx",
"doi": "10.1109/ETCS.2009.697",
"title": "The Improved Method of FGS and Simulation Based on DCT and Wavelet Transform",
"normalizedTitle": "The Improved Method of FGS and Simulation Based on DCT and Wavelet Transform",
"abstract": "With the increase of streaming video application on the internet, the requirement of video compression has changed. The improved method of fine granular scalability(FGS) is given, which based on the wavelet transform and discrete cosine transform(DCT). The simulation has verified, which proves the feasibility of the method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the increase of streaming video application on the internet, the requirement of video compression has changed. The improved method of fine granular scalability(FGS) is given, which based on the wavelet transform and discrete cosine transform(DCT). The simulation has verified, which proves the feasibility of the method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the increase of streaming video application on the internet, the requirement of video compression has changed. The improved method of fine granular scalability(FGS) is given, which based on the wavelet transform and discrete cosine transform(DCT). The simulation has verified, which proves the feasibility of the method.",
"fno": "3557e739",
"keywords": [
"Fine Granular Scalability FGS Wavelet Transform Discrete Cosine Transform DCT"
],
"authors": [
{
"affiliation": null,
"fullName": "Jin Hui-long",
"givenName": "Jin",
"surname": "Hui-long",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xu Cheng-qian",
"givenName": "Xu",
"surname": "Cheng-qian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhang Jin-bo",
"givenName": "Zhang",
"surname": "Jin-bo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chen Jia-xing",
"givenName": "Chen",
"surname": "Jia-xing",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "etcs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-03-01T00:00:00",
"pubType": "proceedings",
"pages": "727-731",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3557-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3557e735",
"articleId": "12OmNAkniY0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3557e811",
"articleId": "12OmNzUxOhe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/5/01327272",
"title": "Improved rate allocation method based on sliding window for FGS video bit-stream",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01327272/12OmNAgY7na",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/5",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ncvpripg/2011/4599/0/4599a167",
"title": "Wavelet Image Resizing in the Block DCT Space",
"doi": null,
"abstractUrl": "/proceedings-article/ncvpripg/2011/4599a167/12OmNAhxjEg",
"parentPublication": {
"id": "proceedings/ncvpripg/2011/4599/0",
"title": "Computer Vision, Pattern Recognition, Image Processing and Graphics, National Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/4/3119d494",
"title": "Local Adaptation of Leak Factor Alpha Based on Mode-Motion-Product of the Base-Layer Macroblocks in FGS Coding with Adaptive Reference",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119d494/12OmNC3FGcD",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/4",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521367",
"title": "High-performance low-complexity bit-plane coding scheme for MPEG-4 FGS",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521367/12OmNrMZpk9",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/paccs/2009/3614/0/3614a011",
"title": "DCT-Based Gabor Transform for Long or Infinite Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/paccs/2009/3614a011/12OmNvDZESu",
"parentPublication": {
"id": "proceedings/paccs/2009/3614/0",
"title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmc/2009/3501/1/3501a429",
"title": "An Algorithm of Adaptive Video Coding Scheme Based on FGS",
"doi": null,
"abstractUrl": "/proceedings-article/cmc/2009/3501a429/12OmNwDACpl",
"parentPublication": {
"id": "cmc/2009/3501/1",
"title": "Communications and Mobile Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cccm/2008/3290/2/3290b070",
"title": "Block Time-Recursive Algorithms for DCT-Based Real-Valued Discrete Gabor Transform",
"doi": null,
"abstractUrl": "/proceedings-article/cccm/2008/3290b070/12OmNwIHoBd",
"parentPublication": {
"id": "proceedings/cccm/2008/3290/3",
"title": "Computing, Communication, Control and Management, ISECS International Colloquium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cccm/2008/3290/2/3290b099",
"title": "DCT-Based Real-Valued Discrete Gabor Transform and Its Fast Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/cccm/2008/3290b099/12OmNx5pj3j",
"parentPublication": {
"id": "proceedings/cccm/2008/3290/3",
"title": "Computing, Communication, Control and Management, ISECS International Colloquium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2010/4297/0/4297a283",
"title": "Infrared Face Recognition Based on Blood Perfusion and Weighted Block-DCT in Wavelet Domain",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2010/4297a283/12OmNy1SFQm",
"parentPublication": {
"id": "proceedings/cis/2010/4297/0",
"title": "2010 International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csnt/2011/4437/0/4437a593",
"title": "Digital Image Watermarking by Using Discrete Wavelet Transform and Discrete Cosine Transform and Comparison Based on PSNR",
"doi": null,
"abstractUrl": "/proceedings-article/csnt/2011/4437a593/12OmNzC5T04",
"parentPublication": {
"id": "proceedings/csnt/2011/4437/0",
"title": "Communication Systems and Network Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAY79oS",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy3iFw0",
"doi": "10.1109/ICME.2014.6890222",
"title": "Restoring corrupted motion capture data via jointly low-rank matrix completion",
"normalizedTitle": "Restoring corrupted motion capture data via jointly low-rank matrix completion",
"abstract": "Motion capture (mocap) technology is widely used in various applications. The acquired mocap data usually has missing data due to occlusions or ambiguities. Therefore, restoring the missing entries of the mocap data is a fundamental issue in mocap data analysis. Based on jointly low-rank matrix completion, this paper presents a practical and highly efficient algorithm for restoring the missing mocap data. Taking advantage of the unique properties of mocap data (i.e, strong correlation among the data), we represent the corrupted data as two types of matrices, where both the local and global characteristics are taken into consideration. Then we formulate the problem as a convex optimization problem, where the missing data is recovered by solving the two matrices using the alternating direction method of multipliers algorithm. Experimental results demonstrate that the proposed scheme significantly outperforms the state-of-the-art algorithms in terms of both the quality and computational cost.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion capture (mocap) technology is widely used in various applications. The acquired mocap data usually has missing data due to occlusions or ambiguities. Therefore, restoring the missing entries of the mocap data is a fundamental issue in mocap data analysis. Based on jointly low-rank matrix completion, this paper presents a practical and highly efficient algorithm for restoring the missing mocap data. Taking advantage of the unique properties of mocap data (i.e, strong correlation among the data), we represent the corrupted data as two types of matrices, where both the local and global characteristics are taken into consideration. Then we formulate the problem as a convex optimization problem, where the missing data is recovered by solving the two matrices using the alternating direction method of multipliers algorithm. Experimental results demonstrate that the proposed scheme significantly outperforms the state-of-the-art algorithms in terms of both the quality and computational cost.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion capture (mocap) technology is widely used in various applications. The acquired mocap data usually has missing data due to occlusions or ambiguities. Therefore, restoring the missing entries of the mocap data is a fundamental issue in mocap data analysis. Based on jointly low-rank matrix completion, this paper presents a practical and highly efficient algorithm for restoring the missing mocap data. Taking advantage of the unique properties of mocap data (i.e, strong correlation among the data), we represent the corrupted data as two types of matrices, where both the local and global characteristics are taken into consideration. Then we formulate the problem as a convex optimization problem, where the missing data is recovered by solving the two matrices using the alternating direction method of multipliers algorithm. Experimental results demonstrate that the proposed scheme significantly outperforms the state-of-the-art algorithms in terms of both the quality and computational cost.",
"fno": "06890222",
"keywords": [
"Trajectory",
"Convex Functions",
"Computers",
"Image Restoration",
"Accuracy",
"Optimization",
"Computational Efficiency",
"Convex Optimization",
"Motion Capture",
"Matrix Completion",
"Low Rank"
],
"authors": [
{
"affiliation": "School of Electrical and Electronics Engineering, Nanyang Technological University, 639798 Singapore",
"fullName": "Junhui Hou",
"givenName": "Junhui",
"surname": "Hou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Electrical and Electronics Engineering, Nanyang Technological University, 639798 Singapore",
"fullName": "Zhen-Peng Bian",
"givenName": "Zhen-Peng",
"surname": "Bian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Electrical and Electronics Engineering, Nanyang Technological University, 639798 Singapore",
"fullName": "Lap-Pui Chau",
"givenName": "Lap-Pui",
"surname": "Chau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Media Innovation, Nanyang Technological University, 639798 Singapore",
"fullName": "Nadia Magnenat-Thalmann",
"givenName": "Nadia",
"surname": "Magnenat-Thalmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computer Engineering, Nanyang Technological University, 639798 Singapore",
"fullName": "Ying He",
"givenName": "Ying",
"surname": "He",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4761-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06890221",
"articleId": "12OmNzC5Tc4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06890223",
"articleId": "12OmNBgz4B0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2011/4362/0/4362a175",
"title": "Improved Quadratic Mapping Algorithm for Restoring Colour in Faded Photographs and Slides",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2011/4362a175/12OmNC3FGaf",
"parentPublication": {
"id": "proceedings/crv/2011/4362/0",
"title": "2011 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2005/9313/0/01577186",
"title": "Improved Papoulis-Gerchberg algorithm for restoring lost samples",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2005/01577186/12OmNvJXeEJ",
"parentPublication": {
"id": "proceedings/isspit/2005/9313/0",
"title": "2005 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150917",
"title": "Synthesis of optimal detail-restoring stack filters for image processing",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150917/12OmNx5Yv6S",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815025",
"title": "Automatic Motion Capture Data Denoising via Filtered Local Subspace Affinity and Low Rank Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815025/12OmNxj23eV",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcgin/2011/4464/0/4464a333",
"title": "A New Approach for Restoring Impulse Noise Corrupted Images",
"doi": null,
"abstractUrl": "/proceedings-article/bcgin/2011/4464a333/12OmNzRZpUo",
"parentPublication": {
"id": "proceedings/bcgin/2011/4464/0",
"title": "2011 International Conference on Business Computing and Global Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/arith/2018/2613/0/08464807",
"title": "Combining Restoring Array and Logarithmic Dividers into an Approximate Hybrid Design",
"doi": null,
"abstractUrl": "/proceedings-article/arith/2018/08464807/13HFz374L4s",
"parentPublication": {
"id": "proceedings/arith/2018/2613/0",
"title": "2018 IEEE 25th Symposium on Computer Arithmetic (ARITH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/01/ttp2013010208",
"title": "Tensor Completion for Estimating Missing Values in Visual Data",
"doi": null,
"abstractUrl": "/journal/tp/2013/01/ttp2013010208/13rRUxC0SPK",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2017/05/07526433",
"title": "Recover Corrupted Data in Sensor Networks: A Matrix Completion Solution",
"doi": null,
"abstractUrl": "/journal/tm/2017/05/07526433/13rRUy0HYKq",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042272",
"title": "Human Motion Capture Data Tailored Transform Coding",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042272/13rRUytWF9m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2020/04/08902027",
"title": "Approximate Restoring Dividers Using Inexact Cells and Estimation From Partial Remainders",
"doi": null,
"abstractUrl": "/journal/tc/2020/04/08902027/1eYNbXtJquc",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxwncA6",
"title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)",
"acronym": "paccs",
"groupId": "1002867",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyUWRa0",
"doi": "10.1109/PACCS.2009.62",
"title": "Generalized Discrete Cosine Transform",
"normalizedTitle": "Generalized Discrete Cosine Transform",
"abstract": "The discrete cosine transform (DCT), introduced by Ahmed, Natarajan and Rao, has been used in many applications of digital signal processing, data compression and information hiding. There are four types of the discrete cosine transform. In simulating the discrete cosine transform, we propose a generalized discrete cosine transform with three parameters, and prove its orthogonality for some new cases. Finally, a new type of discrete cosine transform is proposed and its orthogonality is proved.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The discrete cosine transform (DCT), introduced by Ahmed, Natarajan and Rao, has been used in many applications of digital signal processing, data compression and information hiding. There are four types of the discrete cosine transform. In simulating the discrete cosine transform, we propose a generalized discrete cosine transform with three parameters, and prove its orthogonality for some new cases. Finally, a new type of discrete cosine transform is proposed and its orthogonality is proved.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The discrete cosine transform (DCT), introduced by Ahmed, Natarajan and Rao, has been used in many applications of digital signal processing, data compression and information hiding. There are four types of the discrete cosine transform. In simulating the discrete cosine transform, we propose a generalized discrete cosine transform with three parameters, and prove its orthogonality for some new cases. Finally, a new type of discrete cosine transform is proposed and its orthogonality is proved.",
"fno": "3614a449",
"keywords": [
"Discrete Fourier Transform",
"Discrete Sine Transform",
"Discrete Cosine Transform"
],
"authors": [
{
"affiliation": null,
"fullName": "Jianqin Zhou",
"givenName": "Jianqin",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ping Chen",
"givenName": "Ping",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "paccs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-05-01T00:00:00",
"pubType": "proceedings",
"pages": "449-452",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3614-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3614a446",
"articleId": "12OmNz5s0SY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3614a453",
"articleId": "12OmNrJiCUR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/etcs/2009/3557/1/3557a804",
"title": "Image Encryption Based on Random Fractional Discrete Cosine and Sine Transforms",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2009/3557a804/12OmNvT2oMH",
"parentPublication": {
"id": "proceedings/etcs/2009/3557/2",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/2/3736b390",
"title": "Fast Algorithm for Arbitrary Length Discrete Cosine Transform",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736b390/12OmNwwMeWu",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/2",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2009/3557/2/3557d001",
"title": "High-Precision and Fixed-Point Discrete Cosine Transform without Multiplications",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2009/3557d001/12OmNy2ah2a",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109c326",
"title": "Parallel Algorithm of Two-Dimensional Discrete Cosine Transform Based on Special Data Representation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109c326/12OmNyGbIdh",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/paccs/2009/3614/0/3614a446",
"title": "Generalized Discrete W Transform",
"doi": null,
"abstractUrl": "/proceedings-article/paccs/2009/3614a446/12OmNz5s0SY",
"parentPublication": {
"id": "proceedings/paccs/2009/3614/0",
"title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ias/2009/3744/2/3744b061",
"title": "Image Encryption with Discrete Fractional Cosine Transform and Chaos",
"doi": null,
"abstractUrl": "/proceedings-article/ias/2009/3744b061/12OmNzlD9eO",
"parentPublication": {
"id": "proceedings/ias/2009/3744/2",
"title": "Information Assurance and Security, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1978/10/01674977",
"title": "On Computing the Discrete Cosine Transform",
"doi": null,
"abstractUrl": "/journal/tc/1978/10/01674977/13rRUNvgz8x",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1980/04/01675570",
"title": "A Symmetric Cosine Transform",
"doi": null,
"abstractUrl": "/journal/tc/1980/04/01675570/13rRUxASuar",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1974/01/01672377",
"title": "Discrete Cosine Transfom",
"doi": null,
"abstractUrl": "/journal/tc/1974/01/01672377/13rRUxYrbKI",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1976/07/01674687",
"title": "A Storage Efficient Way to Implement the Discrete Cosine Transform",
"doi": null,
"abstractUrl": "/journal/tc/1976/07/01674687/13rRUygT7x2",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqH9hnt",
"title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers",
"acronym": "acssc",
"groupId": "1000671",
"volume": "0",
"displayVolume": "0",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNykCccn",
"doi": "10.1109/ACSSC.1993.342322",
"title": "Three-dimensional transform coding of multispectral data",
"normalizedTitle": "Three-dimensional transform coding of multispectral data",
"abstract": "We present a three-dimensional terrain-adaptive transform-based bandwidth compression technique for multispectral imagery. The transformation involves one dimensional Karhunen-Loeve transform (KLT) followed by two-dimensional discrete cosine transform. The algorithm exploits the inherent spectral and spatial correlations in the data. The images are spectrally decorrelated via the KLT to produce the eigen images. The resulting spectrally-decorrelated eigen images are then compressed using the JPEG algorithm. The algorithm is conveniently parameterized to accommodate reconstructed image fidelities ranging from near-lossless at about 5:1 compression ratio (CR) to visually lossy beginning at around 40:1 CR. A significant practical advantage of this approach is that it is leveraged on the standard and highly developed JPEG compression technology. Because of the significant compaction of the data resulting from the initial KLT process, an 8-bit JPEG can be used for coding the eigen images associated with 8, 10, or 12 bits multispectral data. The novelty of this technique lies in its unique capability to adaptively vary the characteristics of the spectral decorrelation transformation based upon the local terrain variation.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a three-dimensional terrain-adaptive transform-based bandwidth compression technique for multispectral imagery. The transformation involves one dimensional Karhunen-Loeve transform (KLT) followed by two-dimensional discrete cosine transform. The algorithm exploits the inherent spectral and spatial correlations in the data. The images are spectrally decorrelated via the KLT to produce the eigen images. The resulting spectrally-decorrelated eigen images are then compressed using the JPEG algorithm. The algorithm is conveniently parameterized to accommodate reconstructed image fidelities ranging from near-lossless at about 5:1 compression ratio (CR) to visually lossy beginning at around 40:1 CR. A significant practical advantage of this approach is that it is leveraged on the standard and highly developed JPEG compression technology. Because of the significant compaction of the data resulting from the initial KLT process, an 8-bit JPEG can be used for coding the eigen images associated with 8, 10, or 12 bits multispectral data. The novelty of this technique lies in its unique capability to adaptively vary the characteristics of the spectral decorrelation transformation based upon the local terrain variation.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a three-dimensional terrain-adaptive transform-based bandwidth compression technique for multispectral imagery. The transformation involves one dimensional Karhunen-Loeve transform (KLT) followed by two-dimensional discrete cosine transform. The algorithm exploits the inherent spectral and spatial correlations in the data. The images are spectrally decorrelated via the KLT to produce the eigen images. The resulting spectrally-decorrelated eigen images are then compressed using the JPEG algorithm. The algorithm is conveniently parameterized to accommodate reconstructed image fidelities ranging from near-lossless at about 5:1 compression ratio (CR) to visually lossy beginning at around 40:1 CR. A significant practical advantage of this approach is that it is leveraged on the standard and highly developed JPEG compression technology. Because of the significant compaction of the data resulting from the initial KLT process, an 8-bit JPEG can be used for coding the eigen images associated with 8, 10, or 12 bits multispectral data. The novelty of this technique lies in its unique capability to adaptively vary the characteristics of the spectral decorrelation transformation based upon the local terrain variation.",
"fno": "00342322",
"keywords": [
"Image Coding",
"Bandwidth Compression",
"Discrete Cosine Transforms",
"Encoding",
"Three Dimensional Transform Coding",
"Multispectral Data",
"Terrain Adaptive Transform",
"Bandwidth Compression",
"Multispectral Imagery",
"Karhunen Loeve Transform",
"KLT",
"Two Dimensional Discrete Cosine Transform",
"DCT",
"Spatial Correlations",
"Spectral Correlations",
"Eigen Images",
"JPEG Algorithm",
"Reconstructed Image Fidelities",
"Compression Ratio",
"Spectral Decorrelation Transformation",
"Local Terrain Variation",
"10 Bit",
"12 Bit",
"8 Bit",
"Transform Coding",
"Image Coding",
"Karhunen Loeve Transforms",
"Decorrelation",
"Chromium",
"Bandwidth",
"Multispectral Imaging",
"Discrete Cosine Transforms",
"Image Reconstruction",
"Standards Development"
],
"authors": [
{
"affiliation": "Lockheed Palo Alto Res. Lab., CA, USA",
"fullName": "J.A. Saghri",
"givenName": "J.A.",
"surname": "Saghri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lockheed Palo Alto Res. Lab., CA, USA",
"fullName": "A.G. Tescher",
"givenName": "A.G.",
"surname": "Tescher",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lockheed Palo Alto Res. Lab., CA, USA",
"fullName": "J.T. Reagan",
"givenName": "J.T.",
"surname": "Reagan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acssc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1993-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1342,1343,1344,1345,1346",
"year": "1993",
"issn": "1058-6393",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00342321",
"articleId": "12OmNCyBXf4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00342323",
"articleId": "12OmNyrIaDN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2004/8484/5/01327085",
"title": "Integer to integer Karhunen Loeve transform over finite fields [communication system applications]",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01327085/12OmNApcuiD",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/5",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1993/4120/0/00342320",
"title": "Effects of multispectral compression on machine exploitation",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1993/00342320/12OmNB9t6yH",
"parentPublication": {
"id": "proceedings/acssc/1993/4120/0",
"title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042263",
"title": "Suboptimal Karhunen-Loève Transform for Compression of Astronomical Images",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042263/12OmNrGKeuK",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1992/2717/0/00227461",
"title": "Multispectral KLT-wavelet data compression for Landsat thematic mapper images",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1992/00227461/12OmNwkhTid",
"parentPublication": {
"id": "proceedings/dcc/1992/2717/0",
"title": "1992 Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1997/7761/0/77610231",
"title": "Universal Transform Coding Based on Backward Adaptation",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1997/77610231/12OmNx3Zjhg",
"parentPublication": {
"id": "proceedings/dcc/1997/7761/0",
"title": "Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831612",
"title": "Embedded zerotree wavelet coding of multispectral images",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831612/12OmNxUdv9P",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2000/0595/0/05950088",
"title": "Analysis of Determining Camera Position Via Karhunen-Loeve Transform",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2000/05950088/12OmNxwENAL",
"parentPublication": {
"id": "proceedings/ssiai/2000/0595/0",
"title": "Image Analysis and Interpretation, IEEE Southwest Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1993/4120/0/00342462",
"title": "SPECTRUM analysis of multispectral imagery in conjunction with wavelet/KLT data compression",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1993/00342462/12OmNy314dX",
"parentPublication": {
"id": "proceedings/acssc/1993/4120/0",
"title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/4/01326900",
"title": "Analysis of quantization noise feedback in causal transform coding",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326900/12OmNzfXavy",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/4",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1980/04/01675570",
"title": "A Symmetric Cosine Transform",
"doi": null,
"abstractUrl": "/journal/tc/1980/04/01675570/13rRUxASuar",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrAMEOk",
"title": "Proceedings of 1st International Conference on Image Processing",
"acronym": "icip",
"groupId": "1000349",
"volume": "2",
"displayVolume": "2",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz61duH",
"doi": "10.1109/ICIP.1994.413593",
"title": "Adaptively subsampled image coding with warped polynomials",
"normalizedTitle": "Adaptively subsampled image coding with warped polynomials",
"abstract": "This paper presents an adaptive image coding method which uses an image dependent orthogonal transform. The method is a generalization of a one-dimensional coding scheme which represents signals as linear combinations of signal-dependent time-warped (TW) orthogonal polynomials. This paper briefly summarizes the theory of time-warped signal coding; next, it describes the new image compression method. Basically, this method transforms all of the image rows and columns with different, but not completely independent bases. By adapting the bases to the image, high quality coding is achieved even when retaining only a small number of transform coefficients. Also, the overhead involved in coding the bases is very small. This paper shows that at net bit rates of about 0.3 bpp, images compressed by the new method are sharper and less distorted by ringing effects than those produced by JPEG or the full-image DCT. Block distortion, which is an important problem in JPEG at 0.3 bpp cannot occur in the new method, since it transforms the full image instead of blocks.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an adaptive image coding method which uses an image dependent orthogonal transform. The method is a generalization of a one-dimensional coding scheme which represents signals as linear combinations of signal-dependent time-warped (TW) orthogonal polynomials. This paper briefly summarizes the theory of time-warped signal coding; next, it describes the new image compression method. Basically, this method transforms all of the image rows and columns with different, but not completely independent bases. By adapting the bases to the image, high quality coding is achieved even when retaining only a small number of transform coefficients. Also, the overhead involved in coding the bases is very small. This paper shows that at net bit rates of about 0.3 bpp, images compressed by the new method are sharper and less distorted by ringing effects than those produced by JPEG or the full-image DCT. Block distortion, which is an important problem in JPEG at 0.3 bpp cannot occur in the new method, since it transforms the full image instead of blocks.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an adaptive image coding method which uses an image dependent orthogonal transform. The method is a generalization of a one-dimensional coding scheme which represents signals as linear combinations of signal-dependent time-warped (TW) orthogonal polynomials. This paper briefly summarizes the theory of time-warped signal coding; next, it describes the new image compression method. Basically, this method transforms all of the image rows and columns with different, but not completely independent bases. By adapting the bases to the image, high quality coding is achieved even when retaining only a small number of transform coefficients. Also, the overhead involved in coding the bases is very small. This paper shows that at net bit rates of about 0.3 bpp, images compressed by the new method are sharper and less distorted by ringing effects than those produced by JPEG or the full-image DCT. Block distortion, which is an important problem in JPEG at 0.3 bpp cannot occur in the new method, since it transforms the full image instead of blocks.",
"fno": "00413593",
"keywords": [
"Image Coding",
"Data Compression",
"Transform Coding",
"Polynomials",
"Adaptive Signal Processing",
"Image Sampling",
"Adaptively Subsampled Image Coding",
"Time Warped Signal Coding",
"Orthogonal Transform",
"One Dimensional Coding",
"Time Warped Orthogonal Polynomials",
"Image Compression",
"High Quality Coding",
"Transform Coefficients",
"Overhead",
"Bit Rates",
"Ringing Effects",
"JPEG",
"Full Image DCT",
"Image Coding",
"Polynomials",
"Discrete Cosine Transforms",
"Bit Rate",
"Transform Coding",
"Block Codes",
"Statistics",
"Bandwidth",
"Frequency",
"Information Systems"
],
"authors": [
{
"affiliation": "Dept. of Electron. & Inf. Syst., Ghent Univ., Belgium",
"fullName": "W. Philips",
"givenName": "W.",
"surname": "Philips",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "366,367,368,369,370",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00413592",
"articleId": "12OmNxVlTD7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00413594",
"articleId": "12OmNqGA5nQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icnc/2013/5287/0/06504062",
"title": "On lossless and lossy compression of step size matrices in JPEG coding",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2013/06504062/12OmNBKW9Fg",
"parentPublication": {
"id": "proceedings/icnc/2013/5287/0",
"title": "2013 International Conference on Computing, Networking and Communications (ICNC 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2000/6293/2/00859105",
"title": "Evaluation of a warped linear predictive coding scheme",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2000/00859105/12OmNBSBkb9",
"parentPublication": {
"id": "proceedings/icassp/2000/6293/2",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2017/6721/0/07921907",
"title": "Error Bounds for HDR Image Coding with JPEG XT",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2017/07921907/12OmNrJROZL",
"parentPublication": {
"id": "proceedings/dcc/2017/6721/0",
"title": "2017 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/3/81833094",
"title": "Object-Scalable Mesh-Based Coding of Synthetic and Natural Image Objects",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81833094/12OmNvA1hFX",
"parentPublication": {
"id": "proceedings/icip/1997/8183/3",
"title": "Proceedings of International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1994/6405/2/00471599",
"title": "Lapped multiple bases realizations for the transform coding of still images",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1994/00471599/12OmNxdm4Ke",
"parentPublication": {
"id": "proceedings/acssc/1994/6405/1",
"title": "Proceedings of 1994 28th Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028308",
"title": "Optimization of a monochrome picture coding scheme based on a two-component model",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028308/12OmNzIUg26",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413501",
"title": "A new adaptive interframe transform coding using directional classification",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413501/12OmNzdGnvd",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07042272",
"title": "Human Motion Capture Data Tailored Transform Coding",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07042272/13rRUytWF9m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b780",
"title": "A Neural-network Enhanced Video Coding Framework beyond VVC",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b780/1G56g11jeec",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2020/02/08865439",
"title": "Adaptive-Length Coding of Image Data for Low-Cost Approximate Storage",
"doi": null,
"abstractUrl": "/journal/tc/2020/02/08865439/1e2DkF06J0s",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvlPkDE",
"title": "Proceedings Visualization '94",
"acronym": "visual",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzVXNQ0",
"doi": "10.1109/VISUAL.1994.346332",
"title": "Progressive transmission of scientific data using biorthogonal wavelet transform",
"normalizedTitle": "Progressive transmission of scientific data using biorthogonal wavelet transform",
"abstract": "An important issue in scientific visualization systems is the management of data sets. Most data sets in scientific visualization, whether created by measurement or simulation, are usually voluminous. The goal of data management is to reduce the storage space and the access time of these data sets to speed up the visualization process. A new progressive transmission scheme using spline biorthogonal wavelet bases is proposed in this paper. By exploiting the properties of this set of wavelet bases, a fast algorithm involving only additions and subtractions is developed. Due to the multiresolutional nature of the wavelet transform, this scheme is compatible with hierarchical-structured rendering algorithms. The formula for reconstructing the functional values in a continuous volume space is given in a simple polynomial form. Lossless compression is possible, even when using floating-point numbers. This algorithm has been applied to data from a global ocean model. The lossless compression ratio is about 1.5:1. With a compression ratio of 50:1, the reconstructed data is still of good quality. Several other wavelet bases are compared with the spline biorthogonal wavelet bases. Finally the reconstructed data is visualized using various algorithms and the results are demonstrated.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "An important issue in scientific visualization systems is the management of data sets. Most data sets in scientific visualization, whether created by measurement or simulation, are usually voluminous. The goal of data management is to reduce the storage space and the access time of these data sets to speed up the visualization process. A new progressive transmission scheme using spline biorthogonal wavelet bases is proposed in this paper. By exploiting the properties of this set of wavelet bases, a fast algorithm involving only additions and subtractions is developed. Due to the multiresolutional nature of the wavelet transform, this scheme is compatible with hierarchical-structured rendering algorithms. The formula for reconstructing the functional values in a continuous volume space is given in a simple polynomial form. Lossless compression is possible, even when using floating-point numbers. This algorithm has been applied to data from a global ocean model. The lossless compression ratio is about 1.5:1. With a compression ratio of 50:1, the reconstructed data is still of good quality. Several other wavelet bases are compared with the spline biorthogonal wavelet bases. Finally the reconstructed data is visualized using various algorithms and the results are demonstrated.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An important issue in scientific visualization systems is the management of data sets. Most data sets in scientific visualization, whether created by measurement or simulation, are usually voluminous. The goal of data management is to reduce the storage space and the access time of these data sets to speed up the visualization process. A new progressive transmission scheme using spline biorthogonal wavelet bases is proposed in this paper. By exploiting the properties of this set of wavelet bases, a fast algorithm involving only additions and subtractions is developed. Due to the multiresolutional nature of the wavelet transform, this scheme is compatible with hierarchical-structured rendering algorithms. The formula for reconstructing the functional values in a continuous volume space is given in a simple polynomial form. Lossless compression is possible, even when using floating-point numbers. This algorithm has been applied to data from a global ocean model. The lossless compression ratio is about 1.5:1. With a compression ratio of 50:1, the reconstructed data is still of good quality. Several other wavelet bases are compared with the spline biorthogonal wavelet bases. Finally the reconstructed data is visualized using various algorithms and the results are demonstrated.",
"fno": "00346332",
"keywords": [
"Data Visualisation",
"Splines Mathematics",
"Wavelet Transforms",
"Data Compression",
"Database Management Systems",
"Digital Arithmetic",
"Rendering Computer Graphics",
"Scientific Data Transmission",
"Biorthogonal Wavelet Transform",
"Scientific Visualization Systems",
"Data Management",
"Simulation",
"Storage Space",
"Access Time",
"Progressive Transmission Scheme",
"Spline Biorthogonal Wavelet Bases",
"Fast Algorithm",
"Additions",
"Subtractions",
"Hierarchical Structured Rendering Algorithm",
"Continuous Volume Space",
"Simple Polynomial Form",
"Floating Point Numbers",
"Global Ocean Model",
"Lossless Compression Ratio",
"Wavelet Transforms",
"Data Visualization",
"Continuous Wavelet Transforms",
"Spline",
"Polynomials",
"Discrete Cosine Transforms",
"Discrete Wavelet Transforms",
"Decoding",
"Filters",
"Computational Modeling"
],
"authors": [
{
"affiliation": "NSF Eng. Res. Center for Comput. Field Simulation, Mississippi Univ., MS, USA",
"fullName": "Hai Tao",
"givenName": null,
"surname": "Hai Tao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NSF Eng. Res. Center for Comput. Field Simulation, Mississippi Univ., MS, USA",
"fullName": "R.J. Moorhead",
"givenName": "R.J.",
"surname": "Moorhead",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "visual",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "93-99, CP9",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00346331",
"articleId": "12OmNB7cjk3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00346333",
"articleId": "12OmNqBbHPj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1997/8183/1/00648119",
"title": "Low bit rate image coding with shift orthogonal filter banks",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/00648119/12OmNAYGlAU",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcs/2017/3250/0/08035163",
"title": "An Efficient Codec for Image Compression Based on Spline Wavelet Transform and Improved SPIHT Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/hpcs/2017/08035163/12OmNAlNixP",
"parentPublication": {
"id": "proceedings/hpcs/2017/3250/0",
"title": "2017 International Conference on High-Performance Computing & Simulation (HPCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100422",
"title": "Wavelet transform matched filters for the detection and classification of microcalcifications in mammography",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100422/12OmNBuL1nr",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itng/2015/8828/0/8828a347",
"title": "Fractional Hilbert Transforms of Biorthogonal Wavelets",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2015/8828a347/12OmNC8dggj",
"parentPublication": {
"id": "proceedings/itng/2015/8828/0",
"title": "2015 12th International Conference on Information Technology - New Generations (ITNG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/2/01326413",
"title": "A new complex wavelet transform by using RI-spline wavelet",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326413/12OmNvDI3Mj",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/2",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945738",
"title": "Toward an optimal B-spline wavelet transform for image compression",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945738/12OmNwAKCKX",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2008/3494/2/3494b502",
"title": "Detection of QRS Complexes Based on Biorthogonal Spline Wavelet",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b502/12OmNwKoZhr",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcmp-ugc/2010/986/0/06017977",
"title": "A Comparison of Wavelet-Based Schemes for Parameter Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/hpcmp-ugc/2010/06017977/12OmNwlqhK1",
"parentPublication": {
"id": "proceedings/hpcmp-ugc/2010/986/0",
"title": "2010 DoD High Performance Computing Modernization Program Users Group Conference (HPCMP-UGC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1997/8316/2/00679102",
"title": "Biorthogonal generalization of Meyer wavelets",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1997/00679102/12OmNzZmZlH",
"parentPublication": {
"id": "proceedings/acssc/1997/8316/2",
"title": "Conference Record of the Thirty-First Asilomar Conference on Signals, Systems and Computers (Cat. No.97CB36163)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150922",
"title": "Edge detection using recursive biorthogonal wavelet transform",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150922/12OmNzdGnvT",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyKa5Tk",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBfqG5V",
"doi": "10.1109/ICME.2008.4607706",
"title": "Foreground segmentation with single reference frame using iterative likelihood estimation and graph-cut",
"normalizedTitle": "Foreground segmentation with single reference frame using iterative likelihood estimation and graph-cut",
"abstract": "This paper introduces a new foreground segmentation method. In contrast to most of the related works, our method uses only two image frames, a target frame to process, and a single reference frame. Our method first conducts simple thresholding like background subtraction, but then applies an iteration scheme we propose to estimate the pixel-wise likelihood of belonging to the foreground/background from the frame-to-frame difference. Finally, a further refinement considering edges is applied using graph-cut optimization. Experimental results show the effectiveness of our method, especially in that it keeps good performance over a wide range of the threshold value. That consistent performance will become an important step toward fully-automatic segmentation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a new foreground segmentation method. In contrast to most of the related works, our method uses only two image frames, a target frame to process, and a single reference frame. Our method first conducts simple thresholding like background subtraction, but then applies an iteration scheme we propose to estimate the pixel-wise likelihood of belonging to the foreground/background from the frame-to-frame difference. Finally, a further refinement considering edges is applied using graph-cut optimization. Experimental results show the effectiveness of our method, especially in that it keeps good performance over a wide range of the threshold value. That consistent performance will become an important step toward fully-automatic segmentation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a new foreground segmentation method. In contrast to most of the related works, our method uses only two image frames, a target frame to process, and a single reference frame. Our method first conducts simple thresholding like background subtraction, but then applies an iteration scheme we propose to estimate the pixel-wise likelihood of belonging to the foreground/background from the frame-to-frame difference. Finally, a further refinement considering edges is applied using graph-cut optimization. Experimental results show the effectiveness of our method, especially in that it keeps good performance over a wide range of the threshold value. That consistent performance will become an important step toward fully-automatic segmentation.",
"fno": "04607706",
"keywords": [
"Computer Vision",
"Image Segmentation",
"Iterative Methods",
"Maximum Likelihood Estimation",
"Foreground Segmentation",
"Single Reference Frame",
"Iterative Likelihood Estimation",
"Image Frames",
"Target Frame",
"Simple Thresholding",
"Background Subtraction",
"Iteration Scheme",
"Pixel Wise Likelihood",
"Frame To Frame Difference",
"Graph Cut Optimization",
"Fully Automatic Segmentation",
"Image Segmentation",
"Machine Vision",
"Pixel",
"Image Segmentation",
"Optimization",
"Image Color Analysis",
"Object Segmentation",
"Probability Distribution",
"Cameras",
"Image Segmentation",
"Machine Vision",
"Optimization Method"
],
"authors": [
{
"affiliation": "Information and Robot Technology Research Initiative, The University of Tokyo, 7-3-1, Hongo, Bunkyo-ku, 113-8656, Japan",
"fullName": "Keita Takahashi",
"givenName": "Keita",
"surname": "Takahashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Information and Robot Technology Research Initiative, The University of Tokyo, 7-3-1, Hongo, Bunkyo-ku, 113-8656, Japan",
"fullName": "Taketoshi Mori",
"givenName": "Taketoshi",
"surname": "Mori",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-06-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1945-7871",
"isbn": "978-1-4244-2570-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04607705",
"articleId": "12OmNyywxFY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04607707",
"articleId": "12OmNvStcIS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2011/348/0/06011853",
"title": "New frame rate up-conversion based on foreground/background segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011853/12OmNAnMuv7",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270105",
"title": "Flash Cut: Foreground Extraction with Flash and No-flash Image Pairs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270105/12OmNro0I5A",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761121",
"title": "Silhouette extraction based on iterative spatio-temporal local color transformation and graph-cut segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761121/12OmNvA1hpn",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a750",
"title": "Efficient Foreground Segmentation Using an Image Matting Technology",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a750/12OmNvT2p3t",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761326",
"title": "Monocular video foreground segmentation system",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761326/12OmNyprnw8",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/1/01315055",
"title": "Video repairing: inference of foreground and background under severe occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315055/12OmNz61cV8",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/1",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019397",
"title": "Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019397/12OmNzkuKzl",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07018970",
"title": "SI-Cut: Structural Inconsistency Analysis for Image Foreground Extraction",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07018970/13rRUILLkvu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d243",
"title": "Autoencoder-based background reconstruction and foreground segmentation with background noise estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d243/1L8qvYrCe8o",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/4/01699780",
"title": "Better Foreground Segmentation for Static Cameras via New Energy Form and Dynamic Graph-cut",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/01699780/1i5nhUkr2Ew",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyNQSGO",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNro0I5A",
"doi": "10.1109/CVPR.2007.383080",
"title": "Flash Cut: Foreground Extraction with Flash and No-flash Image Pairs",
"normalizedTitle": "Flash Cut: Foreground Extraction with Flash and No-flash Image Pairs",
"abstract": "In this paper, we propose a novel approach for foreground layer extraction using flash/no-flash image pairs, which we call flash cut. Flash cut is based on the simple observation that only the foreground is significantly brightened by the flash and the background appearance change is very small, if the background is distant. Changes due to flash, motion, and color information are fused in an MRF framework to produce high quality segmentation results. Flash cut handles some amount of camera shake, and foreground motion, which makes it practical for anyone with a flash-equipped camera to use. We validate our approach on a variety of indoor and outdoor examples.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a novel approach for foreground layer extraction using flash/no-flash image pairs, which we call flash cut. Flash cut is based on the simple observation that only the foreground is significantly brightened by the flash and the background appearance change is very small, if the background is distant. Changes due to flash, motion, and color information are fused in an MRF framework to produce high quality segmentation results. Flash cut handles some amount of camera shake, and foreground motion, which makes it practical for anyone with a flash-equipped camera to use. We validate our approach on a variety of indoor and outdoor examples.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a novel approach for foreground layer extraction using flash/no-flash image pairs, which we call flash cut. Flash cut is based on the simple observation that only the foreground is significantly brightened by the flash and the background appearance change is very small, if the background is distant. Changes due to flash, motion, and color information are fused in an MRF framework to produce high quality segmentation results. Flash cut handles some amount of camera shake, and foreground motion, which makes it practical for anyone with a flash-equipped camera to use. We validate our approach on a variety of indoor and outdoor examples.",
"fno": "04270105",
"keywords": [
"Feature Extraction",
"Image Segmentation",
"Flash Cut",
"Foreground Extraction",
"Image Pairs",
"Image Segmentation",
"Cameras",
"Layout",
"Stereo Vision",
"Image Edge Detection",
"Asia",
"Data Mining",
"Computer Vision",
"Infrared Imaging",
"Robustness"
],
"authors": [
{
"affiliation": "Xi'an Jiaotong University, Xi'an, P.R. China",
"fullName": "Jian Sun",
"givenName": "Jian",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia, Beijing, P.R. China",
"fullName": "Jian Sun",
"givenName": "Jian",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Redmond, WA, USA",
"fullName": "Sing Bing Kang",
"givenName": "Sing Bing",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi'an Jiaotong University, Xi'an, P.R. China",
"fullName": "Zong-Ben Xu",
"givenName": "Zong-Ben",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia, Beijing, P.R. China",
"fullName": "Xiaoou Tang",
"givenName": "Xiaoou",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia, Beijing, P.R. China",
"fullName": "Heung-Yeung Shum",
"givenName": "Heung-Yeung",
"surname": "Shum",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2007",
"issn": "1063-6919",
"isbn": "1-4244-1179-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04270092",
"articleId": "12OmNyRPgGk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04270093",
"articleId": "12OmNwekjDp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2008/2570/0/04607706",
"title": "Foreground segmentation with single reference frame using iterative likelihood estimation and graph-cut",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607706/12OmNBfqG5V",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457721",
"title": "Min-cut based segmentation of point clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457721/12OmNvA1hkT",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a375",
"title": "Mutual Foreground Segmentation with Multispectral Stereo Pairs",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a375/12OmNwLfMAP",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a022",
"title": "Saliency Cut in Stereo Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a022/12OmNxwWoGA",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761326",
"title": "Monocular video foreground segmentation system",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761326/12OmNyprnw8",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/044P1A44",
"title": "Robust stereo with flash and no-flash image pairs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/044P1A44/12OmNyr8Yx7",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07018970",
"title": "SI-Cut: Structural Inconsistency Analysis for Image Foreground Extraction",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07018970/13rRUILLkvu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2005/2372/2/01467585",
"title": "Bi-layer segmentation of binocular stereo video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2005/01467585/1htC6f7frVK",
"parentPublication": {
"id": "proceedings/cvpr/2005/2372/2",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/4/01699780",
"title": "Better Foreground Segmentation for Static Cameras via New Energy Form and Dynamic Graph-cut",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/01699780/1i5nhUkr2Ew",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c063",
"title": "Deep Denoising of Flash and No-Flash Pairs for Photography in Low-Light Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c063/1yeJtBvC3FC",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyaXPPU",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNscOUe7",
"doi": "10.1109/ICMEW.2015.7169759",
"title": "Improving DIBR technique to resolve foreground color/depth edge misalignment",
"normalizedTitle": "Improving DIBR technique to resolve foreground color/depth edge misalignment",
"abstract": "In this paper, we propose a method to overcome flaws in the DIBR-synthesized view due to foreground color/depth edge misalignment. Our method is an improvement of the prior work [1], which is based on background sprite model construction for hole-filling. First, the possible flaw areas in the original view are detected and then an alpha matting technique is used to combine two information sources from the foreground and the background to improve the quality of the synthesized view. Experimental results show that our proposed method is capable of resolving such a kind of flaws regardless of complex or simple background, thus making our view synthesis result close to those synthesized with ideal or flawless foreground depths.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a method to overcome flaws in the DIBR-synthesized view due to foreground color/depth edge misalignment. Our method is an improvement of the prior work [1], which is based on background sprite model construction for hole-filling. First, the possible flaw areas in the original view are detected and then an alpha matting technique is used to combine two information sources from the foreground and the background to improve the quality of the synthesized view. Experimental results show that our proposed method is capable of resolving such a kind of flaws regardless of complex or simple background, thus making our view synthesis result close to those synthesized with ideal or flawless foreground depths.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a method to overcome flaws in the DIBR-synthesized view due to foreground color/depth edge misalignment. Our method is an improvement of the prior work [1], which is based on background sprite model construction for hole-filling. First, the possible flaw areas in the original view are detected and then an alpha matting technique is used to combine two information sources from the foreground and the background to improve the quality of the synthesized view. Experimental results show that our proposed method is capable of resolving such a kind of flaws regardless of complex or simple background, thus making our view synthesis result close to those synthesized with ideal or flawless foreground depths.",
"fno": "07169759",
"keywords": [
"Image Color Analysis",
"Silicon",
"Sprites Computer",
"Alpha Matting",
"DIBR",
"View Synthesis",
"Hole Filling"
],
"authors": [
{
"affiliation": "Department of Electrical Engineering, National Chung Cheng University, Chia-Yi, Taiwan, ROC",
"fullName": "Wen-Nung Lie",
"givenName": null,
"surname": "Wen-Nung Lie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "AIM-HI, National Chung Cheng University, Chia-Yi, Taiwan, ROC",
"fullName": "Chun-Cheng Yeh",
"givenName": null,
"surname": "Chun-Cheng Yeh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Computer Science and Information Engineering, Da-Yeh University, Taiwan",
"fullName": "Guo-Shiang Lin",
"givenName": null,
"surname": "Guo-Shiang Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-7079-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07169758",
"articleId": "12OmNxWLTsK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07169760",
"articleId": "12OmNwpoFEZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2012/4711/0/4711a339",
"title": "Foreground-Object-Protected Depth Map Smoothing for DIBR",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a339/12OmNBUS78r",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2016/1552/0/07574740",
"title": "Hole-filling for single-view plus-depth based rendering with temporal texture synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2016/07574740/12OmNqBbHwM",
"parentPublication": {
"id": "proceedings/icmew/2016/1552/0",
"title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169773",
"title": "A disocclusion filling method using multiple sprites with depth for virtual view synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169773/12OmNs0C9Ua",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890181",
"title": "Dictionary based hole filling with assistance of depth",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890181/12OmNvDZF5x",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a750",
"title": "Efficient Foreground Segmentation Using an Image Matting Technology",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a750/12OmNvT2p3t",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460246",
"title": "Automatic segmentation fusing color and depth",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460246/12OmNvTBB00",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/event/2001/1293/0/12930003",
"title": "Foreground Segmentation Using Adaptive Mixture Models in Color and Depth",
"doi": null,
"abstractUrl": "/proceedings-article/event/2001/12930003/12OmNyKa6g8",
"parentPublication": {
"id": "proceedings/event/2001/1293/0",
"title": "Proceedings IEEE Workshop on Detection and Recognition of Events in Video",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/07/07018970",
"title": "SI-Cut: Structural Inconsistency Analysis for Image Foreground Extraction",
"doi": null,
"abstractUrl": "/journal/tg/2015/07/07018970/13rRUILLkvu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08642935",
"title": "A Disocclusion Inpainting Framework for Depth-Based View Synthesis",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08642935/17PYElAbxtK",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2020/4272/0/427200a135",
"title": "No-Reference Quality Prediction for DIBR-Synthesized Images Using Statistics of Fused Color-Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2020/427200a135/1mAa1YjLS2Q",
"parentPublication": {
"id": "proceedings/mipr/2020/4272/0",
"title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx7ouUM",
"title": "2013 International Conference on Computational and Information Sciences",
"acronym": "iccis",
"groupId": "1800262",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvT2p3t",
"doi": "10.1109/ICCIS.2013.202",
"title": "Efficient Foreground Segmentation Using an Image Matting Technology",
"normalizedTitle": "Efficient Foreground Segmentation Using an Image Matting Technology",
"abstract": "Efficient segmentation of foreground moving objects is an important procedure to achieve stable object tracking and recognition. In this paper, we have developed a novel algorithm for foreground segmentation that can extract moving objects from background accurately and efficiently. In our proposed algorithm, Gaussian Mixture Model is first used to model the static background regions. The boundary box of the foreground regions is determined via inter-frame change detection and SIFT feature analysis, and Grabcut algorithm(an image matting technology) is then used to obtain the optimal segmentation of foreground moving objects. Experiments on a set of video clips with huge diverse scenes have demonstrated the efficiency of our proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Efficient segmentation of foreground moving objects is an important procedure to achieve stable object tracking and recognition. In this paper, we have developed a novel algorithm for foreground segmentation that can extract moving objects from background accurately and efficiently. In our proposed algorithm, Gaussian Mixture Model is first used to model the static background regions. The boundary box of the foreground regions is determined via inter-frame change detection and SIFT feature analysis, and Grabcut algorithm(an image matting technology) is then used to obtain the optimal segmentation of foreground moving objects. Experiments on a set of video clips with huge diverse scenes have demonstrated the efficiency of our proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Efficient segmentation of foreground moving objects is an important procedure to achieve stable object tracking and recognition. In this paper, we have developed a novel algorithm for foreground segmentation that can extract moving objects from background accurately and efficiently. In our proposed algorithm, Gaussian Mixture Model is first used to model the static background regions. The boundary box of the foreground regions is determined via inter-frame change detection and SIFT feature analysis, and Grabcut algorithm(an image matting technology) is then used to obtain the optimal segmentation of foreground moving objects. Experiments on a set of video clips with huge diverse scenes have demonstrated the efficiency of our proposed method.",
"fno": "5004a750",
"keywords": [
"Hidden Markov Models",
"Image Color Analysis",
"Image Segmentation",
"Computer Vision",
"Lighting",
"Feature Extraction",
"Object Segmentation",
"Grab Cut",
"Foreground Segmentation",
"Gaussian Mixture Model GMM",
"HSV",
"SIFT"
],
"authors": [
{
"affiliation": null,
"fullName": "Xuchao Gong",
"givenName": "Xuchao",
"surname": "Gong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zongmin Li",
"givenName": "Zongmin",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-06-01T00:00:00",
"pubType": "proceedings",
"pages": "750-753",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5004-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5004a746",
"articleId": "12OmNzn38XM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5004a754",
"articleId": "12OmNC1Y5o5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2009/3583/1/3583a473",
"title": "Foreground Object Segmentation from Dense Multi-view Images",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583a473/12OmNC1Gugp",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/3",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a375",
"title": "Mutual Foreground Segmentation with Multispectral Stereo Pairs",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a375/12OmNwLfMAP",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/1/01394252",
"title": "An automatic segmentation algorithm for moving objects in video sequences under multi-constraints",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394252/12OmNweTvLE",
"parentPublication": {
"id": "proceedings/icme/2004/8603/1",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118d166",
"title": "Object-Based Multiple Foreground Video Co-segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118d166/12OmNx38vMn",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2010/7801/0/05483922",
"title": "Segmentation-tracking feedback approach for high-performance video surveillance applications",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2010/05483922/12OmNxdDFHf",
"parentPublication": {
"id": "proceedings/ssiai/2010/7801/0",
"title": "2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019397",
"title": "Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019397/12OmNzkuKzl",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2002/09/i1291",
"title": "An HMM-Based Segmentation Method for Traffic Monitoring Movies",
"doi": null,
"abstractUrl": "/journal/tp/2002/09/i1291/13rRUxASuwe",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/02/i0279",
"title": "A Dynamic Conditional Random Field Model for Foreground and Shadow Segmentation",
"doi": null,
"abstractUrl": "/journal/tp/2006/02/i0279/13rRUyuegi7",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b770",
"title": "FgGAN: A Cascaded Unpaired Learning for Background Estimation and Foreground Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b770/18j8JRpzQpa",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d243",
"title": "Autoencoder-based background reconstruction and foreground segmentation with background noise estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d243/1L8qvYrCe8o",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4ivU",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "2",
"displayVolume": "2",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxSNvr2",
"doi": "10.1109/ICPR.2000.906106",
"title": "Foreground-Background Segmentation by Cellular Neural Networks",
"normalizedTitle": "Foreground-Background Segmentation by Cellular Neural Networks",
"abstract": "A common procedure in digital postproduction is rotoscoping, the segmentation of independently moving foreground elements from background in a sequence of images. Still often carried out manually, rotoscoping is time-consuming and requires great skill in determining the boundary between foreground and background. Errors lead to a bubbling artifact in the final composited sequence. The industry is interested in automated rotoscoping. Any automatic segmentation method must correctly locate the boundary and be robust given rapid motion and non-static backgrounds. A cellular neural network for segmentation is presented that labels pixels by color, estimated motion and neighboring labels. The method is accurate, laborsaving and many times faster than manual rotoscoping.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A common procedure in digital postproduction is rotoscoping, the segmentation of independently moving foreground elements from background in a sequence of images. Still often carried out manually, rotoscoping is time-consuming and requires great skill in determining the boundary between foreground and background. Errors lead to a bubbling artifact in the final composited sequence. The industry is interested in automated rotoscoping. Any automatic segmentation method must correctly locate the boundary and be robust given rapid motion and non-static backgrounds. A cellular neural network for segmentation is presented that labels pixels by color, estimated motion and neighboring labels. The method is accurate, laborsaving and many times faster than manual rotoscoping.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A common procedure in digital postproduction is rotoscoping, the segmentation of independently moving foreground elements from background in a sequence of images. Still often carried out manually, rotoscoping is time-consuming and requires great skill in determining the boundary between foreground and background. Errors lead to a bubbling artifact in the final composited sequence. The industry is interested in automated rotoscoping. Any automatic segmentation method must correctly locate the boundary and be robust given rapid motion and non-static backgrounds. A cellular neural network for segmentation is presented that labels pixels by color, estimated motion and neighboring labels. The method is accurate, laborsaving and many times faster than manual rotoscoping.",
"fno": "07502438",
"keywords": [
"Segmentation",
"Cellular Neural Networks",
"Motion",
"Color"
],
"authors": [
{
"affiliation": "Kingston University",
"fullName": "P.R. Giaccone",
"givenName": "P.R.",
"surname": "Giaccone",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kingston University",
"fullName": "D. Tsaptsinos",
"givenName": "D.",
"surname": "Tsaptsinos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kingston University",
"fullName": "G.A. Jones",
"givenName": "G.A.",
"surname": "Jones",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-09-01T00:00:00",
"pubType": "proceedings",
"pages": "2438",
"year": "2000",
"issn": null,
"isbn": "0-7695-0750-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07502434",
"articleId": "12OmNyvY9xd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07502442",
"articleId": "12OmNx5Yvc2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8wTfL",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyprnw8",
"doi": "10.1109/ICPR.2008.4761326",
"title": "Monocular video foreground segmentation system",
"normalizedTitle": "Monocular video foreground segmentation system",
"abstract": "This paper proposes an automatic foreground segmentation system based on Gaussian mixture models and dynamic graph cut algorithm. An adaptive per-pixel background model is developed to set the data cost of an image graph. Shadow detection which is important to the foreground segmentation is introduced in this paper. A boundary smoothing algorithm is presented so as to eliminate edge aliasing and composite a foreground object onto a new background seamlessly. We verify our algorithm in different video sequences including some public video sequences. Experimental results demonstrate the efficiency of our proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes an automatic foreground segmentation system based on Gaussian mixture models and dynamic graph cut algorithm. An adaptive per-pixel background model is developed to set the data cost of an image graph. Shadow detection which is important to the foreground segmentation is introduced in this paper. A boundary smoothing algorithm is presented so as to eliminate edge aliasing and composite a foreground object onto a new background seamlessly. We verify our algorithm in different video sequences including some public video sequences. Experimental results demonstrate the efficiency of our proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes an automatic foreground segmentation system based on Gaussian mixture models and dynamic graph cut algorithm. An adaptive per-pixel background model is developed to set the data cost of an image graph. Shadow detection which is important to the foreground segmentation is introduced in this paper. A boundary smoothing algorithm is presented so as to eliminate edge aliasing and composite a foreground object onto a new background seamlessly. We verify our algorithm in different video sequences including some public video sequences. Experimental results demonstrate the efficiency of our proposed method.",
"fno": "04761326",
"keywords": [
"Gaussian Processes",
"Graph Theory",
"Image Segmentation",
"Minimisation",
"Object Detection",
"Smoothing Methods",
"Video Signal Processing",
"Monocular Video Foreground Segmentation System",
"Gaussian Mixture Model",
"Dynamic Graph Cut Algorithm",
"Adaptive Per Pixel Background Model",
"Shadow Detection",
"Boundary Smoothing Algorithm",
"Edge Aliasing Elimination",
"Video Sequence",
"Minimization",
"Costs",
"Image Segmentation",
"Videoconference",
"Video Sequences",
"Pixel",
"Smoothing Methods",
"Automation",
"Heuristic Algorithms",
"Image Edge Detection",
"Streaming Media"
],
"authors": [
{
"affiliation": "Institute of Automation, Chinese Academy of Sciences, China",
"fullName": "Xiaoyu Wu",
"givenName": null,
"surname": "Xiaoyu Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Automation, Chinese Academy of Sciences, China",
"fullName": "Yangsheng Wang",
"givenName": null,
"surname": "Yangsheng Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Automation, Chinese Academy of Sciences, China",
"fullName": "Xiaolong Zheng",
"givenName": null,
"surname": "Xiaolong Zheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1051-4651",
"isbn": "978-1-4244-2174-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04761325",
"articleId": "12OmNvkGWaH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04761327",
"articleId": "12OmNzmLxPH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2008/2570/0/04607694",
"title": "Bi-layer segmentation from stereo video sequences by fusing multiple cues",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607694/12OmNANkoba",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2009/3883/0/3883a288",
"title": "An Effective Method for Foreground Segmentation of Video",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2009/3883a288/12OmNBCHMMb",
"parentPublication": {
"id": "proceedings/icig/2009/3883/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607706",
"title": "Foreground segmentation with single reference frame using iterative likelihood estimation and graph-cut",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607706/12OmNBfqG5V",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmvc/2007/2793/0/27930005",
"title": "Monocular Video Foreground/Background Segmentation by Tracking Spatial-Color Gaussian Mixture Models",
"doi": null,
"abstractUrl": "/proceedings-article/wmvc/2007/27930005/12OmNC943Pc",
"parentPublication": {
"id": "proceedings/wmvc/2007/2793/0",
"title": "Motion and Video Computing, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a375",
"title": "Mutual Foreground Segmentation with Multispectral Stereo Pairs",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a375/12OmNwLfMAP",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118d166",
"title": "Object-Based Multiple Foreground Video Co-segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118d166/12OmNx38vMn",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/1/252110492",
"title": "Video Foreground Segmentation Based on Sequential Feature Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252110492/12OmNy68EBD",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019397",
"title": "Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019397/12OmNzkuKzl",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2005/2372/2/01467585",
"title": "Bi-layer segmentation of binocular stereo video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2005/01467585/1htC6f7frVK",
"parentPublication": {
"id": "proceedings/cvpr/2005/2372/2",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/4/01699780",
"title": "Better Foreground Segmentation for Static Cameras via New Energy Form and Dynamic Graph-cut",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/01699780/1i5nhUkr2Ew",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx3q6UI",
"title": "2016 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"acronym": "ssiai",
"groupId": "1000345",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzdoMCe",
"doi": "10.1109/SSIAI.2016.7459195",
"title": "An unsupervised object-level image segmentation method based on foreground and background priors",
"normalizedTitle": "An unsupervised object-level image segmentation method based on foreground and background priors",
"abstract": "Conventional unsupervised image segmentation methods return many superpixels or object parts and thus tend to over-segmentation. In this paper, we present a novel post-processing approach for unsupervised object-level image segmentation (UnOLIS). Starting with the results of any conventional unsupervised segmentation method, we first combine a global region-based saliency and a robust background feature to cluster the pre-segmented regions into foreground and background. We then design a region growing process, encoded with several object priors, to generate a high quality foreground object segmentation. In parallel, we group the background regions into different stuffs by clustering. We test our method on the Berkeley Segmentation Dataset (BSDS500). Our approach significantly improves conventional unsupervised segmentation methods and achieves almost comparable results as the state-of-the-art supervised image segmentation methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Conventional unsupervised image segmentation methods return many superpixels or object parts and thus tend to over-segmentation. In this paper, we present a novel post-processing approach for unsupervised object-level image segmentation (UnOLIS). Starting with the results of any conventional unsupervised segmentation method, we first combine a global region-based saliency and a robust background feature to cluster the pre-segmented regions into foreground and background. We then design a region growing process, encoded with several object priors, to generate a high quality foreground object segmentation. In parallel, we group the background regions into different stuffs by clustering. We test our method on the Berkeley Segmentation Dataset (BSDS500). Our approach significantly improves conventional unsupervised segmentation methods and achieves almost comparable results as the state-of-the-art supervised image segmentation methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Conventional unsupervised image segmentation methods return many superpixels or object parts and thus tend to over-segmentation. In this paper, we present a novel post-processing approach for unsupervised object-level image segmentation (UnOLIS). Starting with the results of any conventional unsupervised segmentation method, we first combine a global region-based saliency and a robust background feature to cluster the pre-segmented regions into foreground and background. We then design a region growing process, encoded with several object priors, to generate a high quality foreground object segmentation. In parallel, we group the background regions into different stuffs by clustering. We test our method on the Berkeley Segmentation Dataset (BSDS500). Our approach significantly improves conventional unsupervised segmentation methods and achieves almost comparable results as the state-of-the-art supervised image segmentation methods.",
"fno": "07459195",
"keywords": [
"Image Segmentation",
"Image Color Analysis",
"Image Edge Detection",
"Robustness",
"Training",
"Organizations",
"Measurement"
],
"authors": [
{
"affiliation": "Institute of Signal Processing and System Theory, University of Stuttgart, Germany",
"fullName": "Chunlai Wang",
"givenName": "Chunlai",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Signal Processing and System Theory, University of Stuttgart, Germany",
"fullName": "Bin Yang",
"givenName": "Bin",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ssiai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "141-144",
"year": "2016",
"issn": null,
"isbn": "978-1-4673-9919-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07459194",
"articleId": "12OmNBLdKSb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07459196",
"articleId": "12OmNy3Agx4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sitis/2014/7978/0/7978a119",
"title": "Foreground-Background Segmentation Based on Codebook and Edge Detector",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2014/7978a119/12OmNAJ4piS",
"parentPublication": {
"id": "proceedings/sitis/2014/7978/0",
"title": "2014 Tenth International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a750",
"title": "Efficient Foreground Segmentation Using an Image Matting Technology",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a750/12OmNvT2p3t",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118d166",
"title": "Object-Based Multiple Foreground Video Co-segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118d166/12OmNx38vMn",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100246",
"title": "Foreground/background segmentation of color images by integration of multiple cues",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100246/12OmNxEjXXE",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2000/0750/2/07502438",
"title": "Foreground-Background Segmentation by Cellular Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/07502438/12OmNxSNvr2",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761326",
"title": "Monocular video foreground segmentation system",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761326/12OmNyprnw8",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a366",
"title": "Learning Foreground-Background Segmentation from Improved Layered GANs",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a366/1B13wNAGYQE",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3950",
"title": "Labels4Free: Unsupervised Segmentation using StyleGAN",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3950/1BmEZrG8UAE",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d243",
"title": "Autoencoder-based background reconstruction and foreground segmentation with background noise estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d243/1L8qvYrCe8o",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/07/09340560",
"title": "Unsupervised Disentanglement of Pose, Appearance and Background from Images and Videos",
"doi": null,
"abstractUrl": "/journal/tp/2022/07/09340560/1qMJODoYi40",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxFJXGd",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzkuKzl",
"doi": "10.1109/ICME.2017.8019397",
"title": "Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network",
"normalizedTitle": "Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network",
"abstract": "Foreground segmentation in video sequences is a classic topic in computer vision. Due to the lack of semantic and prior knowledge, it is difficult for existing methods to deal with sophisticated scenes well. Therefore, in this paper, we propose an end-to-end two-stage deep convolutional neural network (CNN) framework for foreground segmentation in video sequences. In the first stage, a convolutional encoder-decoder sub-network is employed to reconstruct the background images and encode rich prior knowledge of background scenes. In the second stage, the reconstructed background and current frame are input into a multi-channel fully-convolutional sub-network (MCFCN) for accurate foreground segmentation. In the two-stage CNN, the reconstruction loss and segmentation loss are jointly optimized. The background images and foreground objects are output simultaneously in an end-to-end way. Moreover, by incorporating the prior semantic knowledge of foreground and background in the pre-training process, our method could restrain the background noise and keep the integrity of foreground objects at the same time. Experiments on CDNet 2014 show that our method outperforms the state-of-the-art by 4.9%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Foreground segmentation in video sequences is a classic topic in computer vision. Due to the lack of semantic and prior knowledge, it is difficult for existing methods to deal with sophisticated scenes well. Therefore, in this paper, we propose an end-to-end two-stage deep convolutional neural network (CNN) framework for foreground segmentation in video sequences. In the first stage, a convolutional encoder-decoder sub-network is employed to reconstruct the background images and encode rich prior knowledge of background scenes. In the second stage, the reconstructed background and current frame are input into a multi-channel fully-convolutional sub-network (MCFCN) for accurate foreground segmentation. In the two-stage CNN, the reconstruction loss and segmentation loss are jointly optimized. The background images and foreground objects are output simultaneously in an end-to-end way. Moreover, by incorporating the prior semantic knowledge of foreground and background in the pre-training process, our method could restrain the background noise and keep the integrity of foreground objects at the same time. Experiments on CDNet 2014 show that our method outperforms the state-of-the-art by 4.9%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Foreground segmentation in video sequences is a classic topic in computer vision. Due to the lack of semantic and prior knowledge, it is difficult for existing methods to deal with sophisticated scenes well. Therefore, in this paper, we propose an end-to-end two-stage deep convolutional neural network (CNN) framework for foreground segmentation in video sequences. In the first stage, a convolutional encoder-decoder sub-network is employed to reconstruct the background images and encode rich prior knowledge of background scenes. In the second stage, the reconstructed background and current frame are input into a multi-channel fully-convolutional sub-network (MCFCN) for accurate foreground segmentation. In the two-stage CNN, the reconstruction loss and segmentation loss are jointly optimized. The background images and foreground objects are output simultaneously in an end-to-end way. Moreover, by incorporating the prior semantic knowledge of foreground and background in the pre-training process, our method could restrain the background noise and keep the integrity of foreground objects at the same time. Experiments on CDNet 2014 show that our method outperforms the state-of-the-art by 4.9%.",
"fno": "08019397",
"keywords": [
"Image Reconstruction",
"Image Segmentation",
"Semantics",
"Motion Segmentation",
"Image Restoration",
"Training",
"Foreground Segmentation",
"Background Modeling",
"Convolutional Neural Network"
],
"authors": [
{
"affiliation": "National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China, 100190",
"fullName": "Xu Zhao",
"givenName": "Xu",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China, 100190",
"fullName": "Yingying Chen",
"givenName": "Yingying",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China, 100190",
"fullName": "Ming Tang",
"givenName": "Ming",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China, 100190",
"fullName": "Jinqiao Wang",
"givenName": "Jinqiao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "343-348",
"year": "2017",
"issn": "1945-788X",
"isbn": "978-1-5090-6067-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08019396",
"articleId": "12OmNs0TKUH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08019398",
"articleId": "12OmNs0TKGF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2016/3811/0/07738024",
"title": "A two-stage foreground propagation for moving object detection in a non-stationary",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2016/07738024/12OmNCctfc7",
"parentPublication": {
"id": "proceedings/avss/2016/3811/0",
"title": "2016 13th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apcip/2009/3699/2/3699b214",
"title": "Robust Foreground Segmentation Using Subspace Based Background Model",
"doi": null,
"abstractUrl": "/proceedings-article/apcip/2009/3699b214/12OmNqJq4Bf",
"parentPublication": {
"id": "proceedings/apcip/2009/3699/1",
"title": "Information Processing, Asia-Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/is3c/2012/4655/0/4655a902",
"title": "Image Segmentation Using Proportion of Foreground to Background Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2012/4655a902/12OmNvjyxyF",
"parentPublication": {
"id": "proceedings/is3c/2012/4655/0",
"title": "Computer, Consumer and Control, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a375",
"title": "Mutual Foreground Segmentation with Multispectral Stereo Pairs",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a375/12OmNwLfMAP",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2000/0750/2/07502438",
"title": "Foreground-Background Segmentation by Cellular Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/07502438/12OmNxSNvr2",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/1/01315055",
"title": "Video repairing: inference of foreground and background under severe occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315055/12OmNz61cV8",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/1",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118a368",
"title": "Joint Motion Segmentation and Background Estimation in Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a368/12OmNzBOi34",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2009/3641/0/3641a599",
"title": "Dynamic Background Modeling for Foreground Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2009/3641a599/12OmNzJbR3u",
"parentPublication": {
"id": "proceedings/icis/2009/3641/0",
"title": "Computer and Information Science, ACIS International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b770",
"title": "FgGAN: A Cascaded Unpaired Learning for Background Estimation and Foreground Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b770/18j8JRpzQpa",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d243",
"title": "Autoencoder-based background reconstruction and foreground segmentation with background noise estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d243/1L8qvYrCe8o",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1uqGdWlamUo",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1uqGo1cwPLO",
"doi": "10.1109/WACV48630.2021.00369",
"title": "Fine-grained Foreground Retrieval via Teacher-Student Learning",
"normalizedTitle": "Fine-grained Foreground Retrieval via Teacher-Student Learning",
"abstract": "Foreground image retrieval is a challenging computer vision task. Given a background scene image with a bounding box indicating a target location, the goal is to retrieve a set of images of foreground objects from a given category, which are semantically compatible with the background. We formulate foreground retrieval as a self-supervised domain adaptation task, where the source domain consists of foreground images and the target domain of background images. Specifically, given pretrained object feature extraction networks that serve as teachers, we train a student network to infer compatible foreground features from background images. Thus, foregrounds and backgrounds are effectively mapped into a common feature space, enabling retrieval of the foregrounds that are closest to the target background in that space. A notable feature of our approach is that our training strategy does not require instance segmentation, unlike current state-of-the-art methods. Thus, our method may be applied to diverse foreground categories and background scene types and enables us to retrieve the foreground in a fine-grained manner, which is closer to the requirements of real world applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Foreground image retrieval is a challenging computer vision task. Given a background scene image with a bounding box indicating a target location, the goal is to retrieve a set of images of foreground objects from a given category, which are semantically compatible with the background. We formulate foreground retrieval as a self-supervised domain adaptation task, where the source domain consists of foreground images and the target domain of background images. Specifically, given pretrained object feature extraction networks that serve as teachers, we train a student network to infer compatible foreground features from background images. Thus, foregrounds and backgrounds are effectively mapped into a common feature space, enabling retrieval of the foregrounds that are closest to the target background in that space. A notable feature of our approach is that our training strategy does not require instance segmentation, unlike current state-of-the-art methods. Thus, our method may be applied to diverse foreground categories and background scene types and enables us to retrieve the foreground in a fine-grained manner, which is closer to the requirements of real world applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Foreground image retrieval is a challenging computer vision task. Given a background scene image with a bounding box indicating a target location, the goal is to retrieve a set of images of foreground objects from a given category, which are semantically compatible with the background. We formulate foreground retrieval as a self-supervised domain adaptation task, where the source domain consists of foreground images and the target domain of background images. Specifically, given pretrained object feature extraction networks that serve as teachers, we train a student network to infer compatible foreground features from background images. Thus, foregrounds and backgrounds are effectively mapped into a common feature space, enabling retrieval of the foregrounds that are closest to the target background in that space. A notable feature of our approach is that our training strategy does not require instance segmentation, unlike current state-of-the-art methods. Thus, our method may be applied to diverse foreground categories and background scene types and enables us to retrieve the foreground in a fine-grained manner, which is closer to the requirements of real world applications.",
"fno": "047700d645",
"keywords": [
"Computer Vision",
"Feature Extraction",
"Image Representation",
"Image Retrieval",
"Image Segmentation",
"Learning Artificial Intelligence",
"Object Detection",
"Teacher Student Learning",
"Foreground Image Retrieval",
"Challenging Computer Vision Task",
"Background Scene Image",
"Target Location",
"Foreground Objects",
"Given Category",
"Foreground Retrieval",
"Self Supervised Domain Adaptation Task",
"Source Domain",
"Foreground Images",
"Target Domain",
"Background Images",
"Given Pretrained Object Feature Extraction Networks",
"Student Network",
"Compatible Foreground Features",
"Common Feature Space",
"Target Background",
"Notable Feature",
"Diverse Foreground Categories",
"Background Scene Types",
"Fine Grained Manner",
"Training",
"Computer Vision",
"Adaptation Models",
"Image Segmentation",
"Semantics",
"Image Retrieval",
"Object Detection"
],
"authors": [
{
"affiliation": "Hebrew University",
"fullName": "Zongze Wu",
"givenName": "Zongze",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hebrew University",
"fullName": "Dani Lischinski",
"givenName": "Dani",
"surname": "Lischinski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Eli Shechtman",
"givenName": "Eli",
"surname": "Shechtman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "3645-3653",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0477-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "047700d635",
"articleId": "1uqGmpxUbtu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "047700d654",
"articleId": "1uqGHbgTlVS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2014/4761/0/06890207",
"title": "Speeding uplow rank matrix recovery for foreground separation in surveillance videos",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890207/12OmNALlcgr",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/1/07294828",
"title": "Edge-based foreground detection with higher order derivative Local Binary Patterns for low-resolution video processing",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07294828/12OmNAfy7Kl",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/1",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-cg/2005/2473/0/24730509",
"title": "Foreground-Distortion Method for Image Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730509/12OmNAlvHPU",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a750",
"title": "Efficient Foreground Segmentation Using an Image Matting Technology",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a750/12OmNvT2p3t",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a375",
"title": "Mutual Foreground Segmentation with Multispectral Stereo Pairs",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a375/12OmNwLfMAP",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fit/2013/2503/0/2293a013",
"title": "Foreground Object Detection and Tracking for Visual Surveillance System: A Hybrid Approach",
"doi": null,
"abstractUrl": "/proceedings-article/fit/2013/2293a013/12OmNxiKrWU",
"parentPublication": {
"id": "proceedings/fit/2013/2503/0",
"title": "2013 11th International Conference on Frontiers of Information Technology (FIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/106O1D02",
"title": "On multiple foreground cosegmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/106O1D02/12OmNymjMZL",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv-motion/2005/2271/2/227120020",
"title": "Multiplicative Background-Foreground Estimation Under Uncontrolled Illumination using Intrinsic Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv-motion/2005/227120020/12OmNzV70ne",
"parentPublication": {
"id": "proceedings/wacv-motion/2005/2271/2",
"title": "Applications of Computer Vision and the IEEE Workshop on Motion and Video Computing, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019397",
"title": "Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019397/12OmNzkuKzl",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428086",
"title": "Robust Cross-Scene Foreground Segmentation in Surveillance Video",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428086/1uilYWHYAus",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzBOibR",
"title": "2017 International Conference on Vision, Image and Signal Processing (ICVISP)",
"acronym": "icvisp",
"groupId": "1823144",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwekjvI",
"doi": "10.1109/ICVISP.2017.16",
"title": "Image Denoising Based on the Wavelet Semi-soft Threshold and Total Variation",
"normalizedTitle": "Image Denoising Based on the Wavelet Semi-soft Threshold and Total Variation",
"abstract": "The wavelet threshold denoising method has some defects. For example, the hard threshold function has no continuity at the threshold, which causes the Gibbs ringing effect. The soft threshold is relatively smooth, but the image is blurred. Image denoising based on total variation (TV) can effectively preserve the edge detail of the image, but in the smooth area, the denoising effect is not good. In this paper, a total variation image denoising method based on the wavelet semi-soft threshold is proposed. First, the image is decomposed using the wavelet method and the semi-soft threshold method is used to denoise in the high layer. Then, the wavelet coefficients are used to reconstruct the image. The high-frequency components of the first layer are denoised using the total variation method. The wavelet coefficients of the layers reconstruct the image after denoising. The experimental results demonstrate that the proposed method has a higher PSNR (Peak signal to noise ratio) than other methods, and it can more effectively preserve image detail while the image is denoised.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The wavelet threshold denoising method has some defects. For example, the hard threshold function has no continuity at the threshold, which causes the Gibbs ringing effect. The soft threshold is relatively smooth, but the image is blurred. Image denoising based on total variation (TV) can effectively preserve the edge detail of the image, but in the smooth area, the denoising effect is not good. In this paper, a total variation image denoising method based on the wavelet semi-soft threshold is proposed. First, the image is decomposed using the wavelet method and the semi-soft threshold method is used to denoise in the high layer. Then, the wavelet coefficients are used to reconstruct the image. The high-frequency components of the first layer are denoised using the total variation method. The wavelet coefficients of the layers reconstruct the image after denoising. The experimental results demonstrate that the proposed method has a higher PSNR (Peak signal to noise ratio) than other methods, and it can more effectively preserve image detail while the image is denoised.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The wavelet threshold denoising method has some defects. For example, the hard threshold function has no continuity at the threshold, which causes the Gibbs ringing effect. The soft threshold is relatively smooth, but the image is blurred. Image denoising based on total variation (TV) can effectively preserve the edge detail of the image, but in the smooth area, the denoising effect is not good. In this paper, a total variation image denoising method based on the wavelet semi-soft threshold is proposed. First, the image is decomposed using the wavelet method and the semi-soft threshold method is used to denoise in the high layer. Then, the wavelet coefficients are used to reconstruct the image. The high-frequency components of the first layer are denoised using the total variation method. The wavelet coefficients of the layers reconstruct the image after denoising. The experimental results demonstrate that the proposed method has a higher PSNR (Peak signal to noise ratio) than other methods, and it can more effectively preserve image detail while the image is denoised.",
"fno": "0612a055",
"keywords": [
"Image Denoising",
"Wavelet Transforms",
"Wavelet Semisoft Threshold",
"Wavelet Threshold Denoising Method",
"Hard Threshold Function",
"Gibbs Ringing Effect",
"Denoising Effect",
"Total Variation Image Denoising",
"Wavelet Coefficients",
"Total Variation Method",
"PSNR",
"Peak Signal To Noise Ratio",
"Noise Reduction",
"Wavelet Coefficients",
"Image Edge Detection",
"TV",
"Image Reconstruction",
"Wavelet Transform",
"Wavelet Semi Soft Threshold",
"Total Variation TV",
"Image Denoising"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuqing Zhang",
"givenName": "Yuqing",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ning He",
"givenName": "Ning",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xueyan Zhen",
"givenName": "Xueyan",
"surname": "Zhen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xin Sun",
"givenName": "Xin",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvisp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-09-01T00:00:00",
"pubType": "proceedings",
"pages": "55-62",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-0612-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0612a051",
"articleId": "12OmNro0IeG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0612a063",
"articleId": "12OmNzUxOfX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icfpee/2010/7378/0/05663329",
"title": "Research on Wavelet Image Threshold De-noising",
"doi": null,
"abstractUrl": "/proceedings-article/icfpee/2010/05663329/12OmNC8uRmP",
"parentPublication": {
"id": "proceedings/icfpee/2010/7378/0",
"title": "2010 International Conference on Future Power and Energy Engineering (ICFPEE 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmlc/2003/7865/5/01260085",
"title": "Two improved methods on wavelet image denoising",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2003/01260085/12OmNrJRP88",
"parentPublication": {
"id": "proceedings/icmlc/2003/7865/1",
"title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2015/8660/0/8660a158",
"title": "A Wavelet Image Denoising Based on the New Threshold Function",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2015/8660a158/12OmNvs4vnv",
"parentPublication": {
"id": "proceedings/cis/2015/8660/0",
"title": "2015 11th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/3/4647c494",
"title": "Adaptive Threshold Based on Wavelet Transform Fingerprint Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647c494/12OmNwDj1ja",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/3",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410382",
"title": "Image denoising using wavelet and support vector regression",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410382/12OmNwE9OL5",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2015/7143/0/7143a109",
"title": "An Improved Image Denoising Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2015/7143a109/12OmNxETa49",
"parentPublication": {
"id": "proceedings/icmtma/2015/7143/0",
"title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifita/2009/3600/1/3600a111",
"title": "A New Image Denoising Method Using Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/ifita/2009/3600a111/12OmNxGj9RL",
"parentPublication": {
"id": "proceedings/ifita/2009/3600/3",
"title": "Information Technology and Applications, International Forum on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2015/7143/0/7143a448",
"title": "Enhancement and Denoising Method of Medical Ultrasound Image Based on Wavelet Analysis and Fuzzy Theory",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2015/7143a448/12OmNxwWoAA",
"parentPublication": {
"id": "proceedings/icmtma/2015/7143/0",
"title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snsp/2018/7413/0/741300a306",
"title": "Research on Signal Denoising Method Based on Adaptive Lifting Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/snsp/2018/741300a306/17D45WaTknC",
"parentPublication": {
"id": "proceedings/snsp/2018/7413/0",
"title": "2018 International Conference on Sensor Networks and Signal Processing (SNSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctec/2017/5784/0/578400a162",
"title": "Denoising Processing of Heart Sound Signal Based on Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/icctec/2017/578400a162/1ckrXd42VX2",
"parentPublication": {
"id": "proceedings/icctec/2017/5784/0",
"title": "2017 International Conference on Computer Technology, Electronics and Communication (ICCTEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAlvHKT",
"title": "2017 Fifth International Symposium on Computing and Networking (CANDAR)",
"acronym": "candar",
"groupId": "1803431",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwnYG31",
"doi": "10.1109/CANDAR.2017.88",
"title": "Discrete Periodic Radon Transform Based Weighted Nuclear Norm Minimization for Image Denoising",
"normalizedTitle": "Discrete Periodic Radon Transform Based Weighted Nuclear Norm Minimization for Image Denoising",
"abstract": "In this paper, a novel image denoising scheme based on the weighted nuclear norm minimization (WNNM) in the discrete periodic Radom transform (DPRT) domain is proposed. While the traditional patch-based low rank minimization approach, such as WNNM, has shown highly competitive image denoising performance, they treat all image patch groups with the same strategy hence cannot be optimum since image patches can have different properties. Particularly for patches with sharp edges, they need to be carefully handled as any error in their denoising can lead to significant degradation to the visual quality of the image. For effective denoising of natural lines/edges with prominent singularities, we apply the WNNM operator in the DPRT domain which allows the edges of different orientations to be effectively represented by different DPRT projections. The proposed algorithm first identifies the image patches with strong edges in the DPRT domain. Then, the new DPRT based WNNM operator is applied for their denoising. For the smooth patches, the conventional WNNM operator is performed in the spatial domain. Simulation results unto the various testing images show that the proposed approach achieves a substantial improvement in terms of both peak signal-to-noise (PSNR) ratio and in visual quality as compared with other state-of-the-art image denoising approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a novel image denoising scheme based on the weighted nuclear norm minimization (WNNM) in the discrete periodic Radom transform (DPRT) domain is proposed. While the traditional patch-based low rank minimization approach, such as WNNM, has shown highly competitive image denoising performance, they treat all image patch groups with the same strategy hence cannot be optimum since image patches can have different properties. Particularly for patches with sharp edges, they need to be carefully handled as any error in their denoising can lead to significant degradation to the visual quality of the image. For effective denoising of natural lines/edges with prominent singularities, we apply the WNNM operator in the DPRT domain which allows the edges of different orientations to be effectively represented by different DPRT projections. The proposed algorithm first identifies the image patches with strong edges in the DPRT domain. Then, the new DPRT based WNNM operator is applied for their denoising. For the smooth patches, the conventional WNNM operator is performed in the spatial domain. Simulation results unto the various testing images show that the proposed approach achieves a substantial improvement in terms of both peak signal-to-noise (PSNR) ratio and in visual quality as compared with other state-of-the-art image denoising approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a novel image denoising scheme based on the weighted nuclear norm minimization (WNNM) in the discrete periodic Radom transform (DPRT) domain is proposed. While the traditional patch-based low rank minimization approach, such as WNNM, has shown highly competitive image denoising performance, they treat all image patch groups with the same strategy hence cannot be optimum since image patches can have different properties. Particularly for patches with sharp edges, they need to be carefully handled as any error in their denoising can lead to significant degradation to the visual quality of the image. For effective denoising of natural lines/edges with prominent singularities, we apply the WNNM operator in the DPRT domain which allows the edges of different orientations to be effectively represented by different DPRT projections. The proposed algorithm first identifies the image patches with strong edges in the DPRT domain. Then, the new DPRT based WNNM operator is applied for their denoising. For the smooth patches, the conventional WNNM operator is performed in the spatial domain. Simulation results unto the various testing images show that the proposed approach achieves a substantial improvement in terms of both peak signal-to-noise (PSNR) ratio and in visual quality as compared with other state-of-the-art image denoising approaches.",
"fno": "2087a395",
"keywords": [
"Image Denoising",
"Minimisation",
"Radon Transforms",
"Weighted Nuclear Norm Minimization",
"Image Denoising Scheme",
"Low Rank Minimization Approach",
"Highly Competitive Image Denoising Performance",
"Image Patches",
"Visual Quality",
"DPRT Domain",
"Conventional WNNM Operator",
"Discrete Periodic Radon Transform",
"DPRT Projections",
"Noise Reduction",
"Image Edge Detection",
"Transforms",
"Noise Measurement",
"Image Denoising",
"Radon",
"Minimization",
"Image Denoising",
"Group Based Denoising",
"Weighted Nuclear Norm Minimization WNNM",
"Discrete Periodic Radon Transform DPRT",
"BM 3 D"
],
"authors": [
{
"affiliation": null,
"fullName": "Budianto",
"givenName": null,
"surname": "Budianto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Daniel P.K. Lun",
"givenName": "Daniel P.K.",
"surname": "Lun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "candar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-11-01T00:00:00",
"pubType": "proceedings",
"pages": "395-400",
"year": "2017",
"issn": "2379-1896",
"isbn": "978-1-5386-2087-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2087a389",
"articleId": "12OmNvSKNQX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2087a401",
"articleId": "12OmNCgJeaQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118c862",
"title": "Weighted Nuclear Norm Minimization with Application to Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c862/12OmNAk5HN4",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a603",
"title": "External Patch Prior Guided Internal Clustering for Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a603/12OmNBpEeUR",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2014/7434/0/7434a228",
"title": "Image Denoising Using Low-Rank Dictionary and Sparse Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2014/7434a228/12OmNqGRGhg",
"parentPublication": {
"id": "proceedings/cis/2014/7434/0",
"title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a244",
"title": "Patch Group Based Nonlocal Self-Similarity Prior Learning for Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a244/12OmNrkBwqy",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528298",
"title": "Combining the power of Internal and External denoising",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528298/12OmNzWOBfJ",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b105",
"title": "Multi-channel Weighted Nuclear Norm Minimization for Real Color Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b105/12OmNzlD9dJ",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000d165",
"title": "Multi-scale Weighted Nuclear Norm Image Restoration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000d165/17D45WXIkDR",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b886",
"title": "Good Similar Patches for Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b886/18j8OcUzs8U",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093586",
"title": "Identifying Recurring Patterns with Deep Neural Networks for Natural Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093586/1jPbDVUAEso",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412518",
"title": "Ultrasound Image Restoration Using Weighted Nuclear Norm Minimization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412518/1tmiqjLFHY4",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCcbEdk",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"acronym": "visapp",
"groupId": "1806906",
"volume": "1",
"displayVolume": "1",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxWLTke",
"doi": "",
"title": "Local regression based colorization coding",
"normalizedTitle": "Local regression based colorization coding",
"abstract": "A new image coding technique for color image based on colorization method is proposed. In colorization based image coding, the encoder selects the colorization coefficients according to the basis made from the luminance channel. Then, in the decoder, the chrominance channels are reconstructed by utilizing the luminance channel and the colorization coefficients sent from the encoder. The main issue in colorization based coding is to extract colorization coefficients well such that the compression rate and the quality of the reconstructed color becomes good enough. In this paper, we use a local regression method to extract the correlated feature between the luminance channel and the chrominance channels. The local regions are obtained by performing an image segmentation on the luminance channel both in the encoder and the decoder. Then, in the decoder, the chrominance values in each local region are reconstructed via a local regression method. The use of the correlated features helps to colorize the image with more details. The experimental results show that the proposed algorithm performs better than JPEG and JPEG2000 in terms of the compression rate and the PSNR value.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A new image coding technique for color image based on colorization method is proposed. In colorization based image coding, the encoder selects the colorization coefficients according to the basis made from the luminance channel. Then, in the decoder, the chrominance channels are reconstructed by utilizing the luminance channel and the colorization coefficients sent from the encoder. The main issue in colorization based coding is to extract colorization coefficients well such that the compression rate and the quality of the reconstructed color becomes good enough. In this paper, we use a local regression method to extract the correlated feature between the luminance channel and the chrominance channels. The local regions are obtained by performing an image segmentation on the luminance channel both in the encoder and the decoder. Then, in the decoder, the chrominance values in each local region are reconstructed via a local regression method. The use of the correlated features helps to colorize the image with more details. The experimental results show that the proposed algorithm performs better than JPEG and JPEG2000 in terms of the compression rate and the PSNR value.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A new image coding technique for color image based on colorization method is proposed. In colorization based image coding, the encoder selects the colorization coefficients according to the basis made from the luminance channel. Then, in the decoder, the chrominance channels are reconstructed by utilizing the luminance channel and the colorization coefficients sent from the encoder. The main issue in colorization based coding is to extract colorization coefficients well such that the compression rate and the quality of the reconstructed color becomes good enough. In this paper, we use a local regression method to extract the correlated feature between the luminance channel and the chrominance channels. The local regions are obtained by performing an image segmentation on the luminance channel both in the encoder and the decoder. Then, in the decoder, the chrominance values in each local region are reconstructed via a local regression method. The use of the correlated features helps to colorize the image with more details. The experimental results show that the proposed algorithm performs better than JPEG and JPEG2000 in terms of the compression rate and the PSNR value.",
"fno": "07294800",
"keywords": [
"Image Reconstruction",
"Transform Coding",
"Color",
"Image Coding",
"Decoding",
"Minimization",
"Image Color Analysis",
"Color Image Compression",
"Colorization",
"Linear Regression",
"Colorization Matrix"
],
"authors": [
{
"affiliation": "Institute of BioMed-IT, Energy-IT and Smart-IT Technology (BEST), Yonsei University, 134 Shinchon, Seodaemun-Ku, Seoul, South Korea",
"fullName": "Paul Oh",
"givenName": "Paul",
"surname": "Oh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Software Engineering, Dongseo University, 47 Jurye-ro, Sasang-Ku, Busan, South Korea",
"fullName": "Suk Ho Lee",
"givenName": "Suk Ho",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of BioMed-IT, Energy-IT and Smart-IT Technology (BEST), Yonsei University, 134 Shinchon, Seodaemun-Ku, Seoul, South Korea",
"fullName": "Moon Gi Kang",
"givenName": "Moon Gi",
"surname": "Kang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "visapp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-01-01T00:00:00",
"pubType": "proceedings",
"pages": "153-159",
"year": "2014",
"issn": null,
"isbn": "978-9-8975-8133-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07294799",
"articleId": "12OmNyL0Tml",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07294801",
"articleId": "12OmNzFMFrS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ispan/2009/3908/0/3908a492",
"title": "A Flexible and Effective Colorization System",
"doi": null,
"abstractUrl": "/proceedings-article/ispan/2009/3908a492/12OmNA0MZ6t",
"parentPublication": {
"id": "proceedings/ispan/2009/3908/0",
"title": "Parallel Architectures, Algorithms, and Networks, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100562",
"title": "Chroma coding for video at very low bit rates",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100562/12OmNB9bvi4",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460810",
"title": "Patch-based image colorization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460810/12OmNBigFy1",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2007/1834/0/04458137",
"title": "Fast Digital Image Colorization Technique",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2007/04458137/12OmNrJRPfb",
"parentPublication": {
"id": "proceedings/isspit/2007/1834/0",
"title": "2007 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2013/0602/0/06579333",
"title": "Fast JPEG Color Space Conversion on Shared Memory",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2013/06579333/12OmNxIzWNt",
"parentPublication": {
"id": "proceedings/icisa/2013/0602/0",
"title": "2013 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/3/01326638",
"title": "A very low bit-rate embedded color image coding with SPIHT",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326638/12OmNzRHOLs",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbfd/2021/1227/0/122700a045",
"title": "Image Colorization Algorithm Based on Improved GAN",
"doi": null,
"abstractUrl": "/proceedings-article/cbfd/2021/122700a045/1CJfR0SePHq",
"parentPublication": {
"id": "proceedings/cbfd/2021/1227/0",
"title": "2021 International Conference on Computer, Blockchain and Financial Development (CBFD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800j360",
"title": "Stylization-Based Architecture for Fast Deep Exemplar Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800j360/1m3oarBqEnK",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/03/09186041",
"title": "Interactive Deep Colorization and its Application for Image Compression",
"doi": null,
"abstractUrl": "/journal/tg/2022/03/09186041/1mP2JjLRhDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2020/9234/0/923400a001",
"title": "Automatic Image Colorization via Weighted Sparse Representation Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2020/923400a001/1uGXZvzfk4w",
"parentPublication": {
"id": "proceedings/icdh/2020/9234/0",
"title": "2020 8th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gyskCPmgco",
"doi": "10.1109/ISMAR-Adjunct.2019.00-21",
"title": "Indoor Scene Reconstruction: From Panorama Images to CAD Models",
"normalizedTitle": "Indoor Scene Reconstruction: From Panorama Images to CAD Models",
"abstract": "This paper presents a novel method of reconstructing indoor scenes, both structures and objects, by a single panorama photo. The method combines room structure estimation, furniture detection, models selection, as well as 3D positions reasoning. Compare with others, our preliminary results show this method could get almost the same performance with a simpler procedure.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a novel method of reconstructing indoor scenes, both structures and objects, by a single panorama photo. The method combines room structure estimation, furniture detection, models selection, as well as 3D positions reasoning. Compare with others, our preliminary results show this method could get almost the same performance with a simpler procedure.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a novel method of reconstructing indoor scenes, both structures and objects, by a single panorama photo. The method combines room structure estimation, furniture detection, models selection, as well as 3D positions reasoning. Compare with others, our preliminary results show this method could get almost the same performance with a simpler procedure.",
"fno": "476500a317",
"keywords": [
"CAD",
"Image Reconstruction",
"Solid Modelling",
"Panorama Images",
"CAD Models",
"Room Structure Estimation",
"Furniture Detection",
"Model Selection",
"Indoor Scene Reconstruction",
"3 D Position Reasoning",
"Solid Modeling",
"Image Reconstruction",
"Three Dimensional Displays",
"Object Detection",
"Image Segmentation",
"Cognition",
"Computational Modeling",
"Indoor Scene Reconstruction",
"CAD Model",
"Panorama Images"
],
"authors": [
{
"affiliation": "China Academy of Electronics and Information Technology; Xidian University",
"fullName": "Chongyang Luo",
"givenName": "Chongyang",
"surname": "Luo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "China Academy of Electronics and Information Technology",
"fullName": "Bochao Zou",
"givenName": "Bochao",
"surname": "Zou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "China Academy of Electronics and Information Technology",
"fullName": "Xiangwen Lyu",
"givenName": "Xiangwen",
"surname": "Lyu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China; China Academy of Electronics and Information Technology",
"fullName": "Haiyong Xie",
"givenName": "Haiyong",
"surname": "Xie",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "317-320",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a311",
"articleId": "1gysnoCVWbm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a321",
"articleId": "1gysmgLCKoU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2013/5050/0/5050a607",
"title": "A Space Carving Based Reconstruction Method Using Discrete Viewing Edges",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a607/12OmNBQTJhl",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/07780824",
"title": "Adaptive 3D Face Reconstruction from Unconstrained Photo Collections",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/07780824/12OmNqBtiNO",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a362",
"title": "Automatic Indoor 3D Surface Reconstruction with Segmented Building and Object Elements",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a362/12OmNqHItNn",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457c422",
"title": "IM2CAD",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c422/12OmNzV70p4",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/11/07776921",
"title": "Adaptive 3D Face Reconstruction from Unconstrained Photo Collections",
"doi": null,
"abstractUrl": "/journal/tp/2017/11/07776921/13rRUxAAT8W",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08039524",
"title": "A Data-Driven Approach for Furniture and Indoor Scene Colorization",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08039524/13rRUy3gn7D",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000d926",
"title": "Automatic 3D Indoor Scene Modeling from Single Panorama",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000d926/17D45VtKiys",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c172",
"title": "3D Scene Reconstruction With Multi-Layer Depth and Epipolar Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c172/1hVlfLRJFS0",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a292",
"title": "3D Reconstruction and Understanding of Indoor Scene Based on Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a292/1vg846FgzWo",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1333",
"title": "Indoor Panorama Planar 3D Reconstruction via Divide and Conquer",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1333/1yeLoZfNPZC",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1nHRQncZfOM",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1nHRVtisZb2",
"doi": "10.1109/ICVRV47840.2019.00009",
"title": "Interactive Grayscale Image Colorization with Generative Adversarial Networks",
"normalizedTitle": "Interactive Grayscale Image Colorization with Generative Adversarial Networks",
"abstract": "Grayscale image colorization is a classical image editing problem. There are two different methods for colorization. The interaction-based colorization method can generate results based on user interaction. However, this method requires considerable artificial interaction to achieve the desired results. Another method is automatic colorization based on deep learning. However, in this case, the colorization result is unique and cannot be adjusted if the result is incorrect or if the user has additional requirements. In this paper, we combine deep learning with user interaction and propose a grayscale image colorization method based on generative adversarial networks. In this method, a full convolutional neural network is constructed based on the U-net structure as a generator that can process images of any size. The training data is automatically generated by randomly simulating the interactive strokes. The experimental results indicate that this approach can efficiently achieve good colorization results and is capable of generating results based on different user interactions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Grayscale image colorization is a classical image editing problem. There are two different methods for colorization. The interaction-based colorization method can generate results based on user interaction. However, this method requires considerable artificial interaction to achieve the desired results. Another method is automatic colorization based on deep learning. However, in this case, the colorization result is unique and cannot be adjusted if the result is incorrect or if the user has additional requirements. In this paper, we combine deep learning with user interaction and propose a grayscale image colorization method based on generative adversarial networks. In this method, a full convolutional neural network is constructed based on the U-net structure as a generator that can process images of any size. The training data is automatically generated by randomly simulating the interactive strokes. The experimental results indicate that this approach can efficiently achieve good colorization results and is capable of generating results based on different user interactions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Grayscale image colorization is a classical image editing problem. There are two different methods for colorization. The interaction-based colorization method can generate results based on user interaction. However, this method requires considerable artificial interaction to achieve the desired results. Another method is automatic colorization based on deep learning. However, in this case, the colorization result is unique and cannot be adjusted if the result is incorrect or if the user has additional requirements. In this paper, we combine deep learning with user interaction and propose a grayscale image colorization method based on generative adversarial networks. In this method, a full convolutional neural network is constructed based on the U-net structure as a generator that can process images of any size. The training data is automatically generated by randomly simulating the interactive strokes. The experimental results indicate that this approach can efficiently achieve good colorization results and is capable of generating results based on different user interactions.",
"fno": "09212835",
"keywords": [
"Convolutional Neural Nets",
"Image Colour Analysis",
"Learning Artificial Intelligence",
"Training Data",
"Interactive Strokes",
"Convolutional Neural Network",
"Grayscale Image Colorization Method",
"Deep Learning",
"Automatic Colorization",
"Artificial Interaction",
"User Interaction",
"Interaction Based Colorization Method",
"Classical Image Editing Problem",
"Generative Adversarial Networks",
"Interactive Grayscale Image Colorization",
"Gray Scale",
"Image Color Analysis",
"Training",
"Color",
"Generators",
"Neural Networks",
"Generative Adversarial Networks",
"Computing Methodologies",
"Artificial Intelligence",
"Computer Vision",
"Image Representations"
],
"authors": [
{
"affiliation": "Beihang University, Beijing, China",
"fullName": "Kai Wang",
"givenName": "Kai",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University, Beijing, China",
"fullName": "Jianwei Li",
"givenName": "Jianwei",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University, Beijing, China; Peng Cheng Laboratory, Shenzhen, China",
"fullName": "Bin Zhou",
"givenName": "Bin",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2019",
"issn": "2375-141X",
"isbn": "978-1-7281-4752-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09212959",
"articleId": "1nHRUgb0GWs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09213042",
"articleId": "1nHRTBSxq2A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2013/5050/0/5050a089",
"title": "An Interactive Framework for Video Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a089/12OmNBLdKIJ",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a415",
"title": "Deep Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a415/12OmNBNM93v",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460810",
"title": "Patch-based image colorization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460810/12OmNBigFy1",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a032",
"title": "Colorization by Multidimensional Projection",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a032/12OmNBsLPdX",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b787",
"title": "iColoriT: Towards Propagating Local Hints to the Right Region in Interactive Colorization by Leveraging Vision Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b787/1KxUuahpqJG",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093389",
"title": "ChromaGAN: Adversarial Picture Colorization with Semantic Class Distribution",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093389/1jPbfLAnmEg",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h965",
"title": "Instance-Aware Image Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h965/1m3nNZyhYXe",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/03/09186041",
"title": "Interactive Deep Colorization and its Application for Image Compression",
"doi": null,
"abstractUrl": "/journal/tg/2022/03/09186041/1mP2JjLRhDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdia/2020/2232/0/223200a214",
"title": "DDGAN: Double Discriminators GAN for Accurate Image Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/bigdia/2020/223200a214/1stvtNQUp20",
"parentPublication": {
"id": "proceedings/bigdia/2020/2232/0",
"title": "2020 6th International Conference on Big Data and Information Analytics (BigDIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2020/9234/0/923400a001",
"title": "Automatic Image Colorization via Weighted Sparse Representation Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2020/923400a001/1uGXZvzfk4w",
"parentPublication": {
"id": "proceedings/icdh/2020/9234/0",
"title": "2020 8th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBOll8c",
"title": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"acronym": "asonam",
"groupId": "1002866",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqI04VQ",
"doi": "10.1145/2808797.2808879",
"title": "Identifying disruptive events from social media to enhance situational awareness",
"normalizedTitle": "Identifying disruptive events from social media to enhance situational awareness",
"abstract": "Decision makers use information from a range of terrestrial and online sources to help underpin the processes through which they develop policies and react to events as they unfold. One such source of online information is social media. Twitter, as a form of social media, is a popular micro-blogging Web application serving hundreds of millions of users. User-generated content can be exploited as a rich source of information for identifying ‘real-world’ disruptive events. In this paper, we present an in-depth comparison of three types of features that could be useful for identifying disruptive events: temporal, spatial and textual. We make several interesting observations: first, disruptive events are identifiable regardless of the \"influence of the user\" discussing them, and over a variety of topics. Second, temporal features are the best event identifiers and hence should not be disregarded or ignored. Third, a combination of optimum textual features with temporal and spatial features achieves best performance in the event detection task. We believe that these findings provide new insights for gathering information around real-world events as well as a useful resource for improving situational awareness and decision support.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Decision makers use information from a range of terrestrial and online sources to help underpin the processes through which they develop policies and react to events as they unfold. One such source of online information is social media. Twitter, as a form of social media, is a popular micro-blogging Web application serving hundreds of millions of users. User-generated content can be exploited as a rich source of information for identifying ‘real-world’ disruptive events. In this paper, we present an in-depth comparison of three types of features that could be useful for identifying disruptive events: temporal, spatial and textual. We make several interesting observations: first, disruptive events are identifiable regardless of the \"influence of the user\" discussing them, and over a variety of topics. Second, temporal features are the best event identifiers and hence should not be disregarded or ignored. Third, a combination of optimum textual features with temporal and spatial features achieves best performance in the event detection task. We believe that these findings provide new insights for gathering information around real-world events as well as a useful resource for improving situational awareness and decision support.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Decision makers use information from a range of terrestrial and online sources to help underpin the processes through which they develop policies and react to events as they unfold. One such source of online information is social media. Twitter, as a form of social media, is a popular micro-blogging Web application serving hundreds of millions of users. User-generated content can be exploited as a rich source of information for identifying ‘real-world’ disruptive events. In this paper, we present an in-depth comparison of three types of features that could be useful for identifying disruptive events: temporal, spatial and textual. We make several interesting observations: first, disruptive events are identifiable regardless of the \"influence of the user\" discussing them, and over a variety of topics. Second, temporal features are the best event identifiers and hence should not be disregarded or ignored. Third, a combination of optimum textual features with temporal and spatial features achieves best performance in the event detection task. We believe that these findings provide new insights for gathering information around real-world events as well as a useful resource for improving situational awareness and decision support.",
"fno": "07403658",
"keywords": [
"Twitter",
"Feature Extraction",
"Media",
"Event Detection",
"Real Time Systems",
"Data Mining",
"Safety",
"Feature Selection",
"Data Mining",
"Event Detection"
],
"authors": [
{
"affiliation": "Cardiff School of Computer Science & Informatics, Cardiff University",
"fullName": "Nasser Alsaedi",
"givenName": "Nasser",
"surname": "Alsaedi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cardiff School of Computer Science & Informatics, Cardiff University",
"fullName": "Pete Burnap",
"givenName": "Pete",
"surname": "Burnap",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cardiff School of Computer Science & Informatics, Cardiff University",
"fullName": "Omer Rana",
"givenName": "Omer",
"surname": "Rana",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "asonam",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-08-01T00:00:00",
"pubType": "proceedings",
"pages": "934-941",
"year": "2015",
"issn": null,
"isbn": "978-1-4503-3854-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07403657",
"articleId": "12OmNvFpEyD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07403659",
"articleId": "12OmNAYXWB1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/trustcom/2014/6513/0/6513a605",
"title": "Crowd Sensing of Urban Emergency Events Based on Social Media Big Data",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2014/6513a605/12OmNCwCLto",
"parentPublication": {
"id": "proceedings/trustcom/2014/6513/0",
"title": "2014 IEEE 13th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2015/3854/0/07403582",
"title": "Feature extraction and analysis for identifying disruptive events from social media",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2015/07403582/12OmNvkGW5C",
"parentPublication": {
"id": "proceedings/asonam/2015/3854/0",
"title": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wsc/2003/8131/2/01261602",
"title": "Reasoning about actions and events in situational simulations",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2003/01261602/12OmNyGtjd0",
"parentPublication": {
"id": "proceedings/wsc/2003/8131/2",
"title": "Proceedings of the 2003 Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945732",
"title": "Using big data values to enhance social event detection pattern",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945732/12OmNyUFfW7",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2016/4470/0/4470a216",
"title": "Sensing Real-World Events Using Social Media Data and a Classification-Clustering Framework",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2016/4470a216/12OmNz5JC35",
"parentPublication": {
"id": "proceedings/wi/2016/4470/0",
"title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2015/3854/0/07403588",
"title": "Finding non-redundant multi-word events on Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2015/07403588/12OmNzUxO7C",
"parentPublication": {
"id": "proceedings/asonam/2015/3854/0",
"title": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccps/2018/5301/0/530101a065",
"title": "Dynamic Integration of Heterogeneous Transportation Modes under Disruptive Events",
"doi": null,
"abstractUrl": "/proceedings-article/iccps/2018/530101a065/13bd1sx4Zsk",
"parentPublication": {
"id": "proceedings/iccps/2018/5301/0",
"title": "2018 ACM/IEEE 9th International Conference on Cyber-Physical Systems (ICCPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2018/02/07547310",
"title": "From Latency, Through Outbreak, to Decline: Detecting Different States of Emergency Events Using Web Resources",
"doi": null,
"abstractUrl": "/journal/bd/2018/02/07547310/13rRUwInv6q",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2017/4993/0/09069073",
"title": "Exploring Social Media for Event Attendance",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2017/09069073/1j9xVIttoyY",
"parentPublication": {
"id": "proceedings/asonam/2017/4993/0",
"title": "2017 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/03/09094319",
"title": "Predicting Hot Events in the Early Period through Bayesian Model for Social Networks",
"doi": null,
"abstractUrl": "/journal/tk/2022/03/09094319/1jQNpjEcx44",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBOll8c",
"title": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"acronym": "asonam",
"groupId": "1002866",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvkGW5C",
"doi": "10.1145/2808797.2808867",
"title": "Feature extraction and analysis for identifying disruptive events from social media",
"normalizedTitle": "Feature extraction and analysis for identifying disruptive events from social media",
"abstract": "Disruptive event identification is a concept that is crucial to ensuring public safety regarding large-scale events. Recent work on detecting events from social media shows that although these platforms are used for social purposes, they have been emerging as important source of information. Twitter, as a form of social media, is a popular micro-blogging web application serving hundreds of millions of users. User-generated content can be exploited as a rich source of information for identifying ‘real-world’ disruptive events — events that threaten social safety and security, or could cause disruption to social order. In this paper, we present an in-depth comparison of two types of feature that could be useful for identifying disruptive events: temporal and textual features. On the basis of these features, we investigate the dynamics of event/topic identification over time. We make several interesting observations: first, disruptive events are identifiable regardless of the \"influence of the user\" discussing them, and over a variety of topics. Second, temporal features play a central role in event detection and hence should not be disregarded or ignored. Third, textual features can be used to improve the overall performance of the event detection. We believe that these findings provide new insights for gathering information around real-world events, in particular for detecting disruptive events.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Disruptive event identification is a concept that is crucial to ensuring public safety regarding large-scale events. Recent work on detecting events from social media shows that although these platforms are used for social purposes, they have been emerging as important source of information. Twitter, as a form of social media, is a popular micro-blogging web application serving hundreds of millions of users. User-generated content can be exploited as a rich source of information for identifying ‘real-world’ disruptive events — events that threaten social safety and security, or could cause disruption to social order. In this paper, we present an in-depth comparison of two types of feature that could be useful for identifying disruptive events: temporal and textual features. On the basis of these features, we investigate the dynamics of event/topic identification over time. We make several interesting observations: first, disruptive events are identifiable regardless of the \"influence of the user\" discussing them, and over a variety of topics. Second, temporal features play a central role in event detection and hence should not be disregarded or ignored. Third, textual features can be used to improve the overall performance of the event detection. We believe that these findings provide new insights for gathering information around real-world events, in particular for detecting disruptive events.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Disruptive event identification is a concept that is crucial to ensuring public safety regarding large-scale events. Recent work on detecting events from social media shows that although these platforms are used for social purposes, they have been emerging as important source of information. Twitter, as a form of social media, is a popular micro-blogging web application serving hundreds of millions of users. User-generated content can be exploited as a rich source of information for identifying ‘real-world’ disruptive events — events that threaten social safety and security, or could cause disruption to social order. In this paper, we present an in-depth comparison of two types of feature that could be useful for identifying disruptive events: temporal and textual features. On the basis of these features, we investigate the dynamics of event/topic identification over time. We make several interesting observations: first, disruptive events are identifiable regardless of the \"influence of the user\" discussing them, and over a variety of topics. Second, temporal features play a central role in event detection and hence should not be disregarded or ignored. Third, textual features can be used to improve the overall performance of the event detection. We believe that these findings provide new insights for gathering information around real-world events, in particular for detecting disruptive events.",
"fno": "07403582",
"keywords": [
"Feature Extraction",
"Twitter",
"Media",
"Event Detection",
"Data Mining",
"Safety",
"Feature Selection",
"Data Mining",
"Event Detection"
],
"authors": [
{
"affiliation": "Cardiff School of Computer Science & Informatics, Cardiff University, Cardiff, UK",
"fullName": "Nasser Alsaedi",
"givenName": "Nasser",
"surname": "Alsaedi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cardiff School of Computer Science & Informatics, Cardiff University, Cardiff, UK",
"fullName": "Pete Burnap",
"givenName": "Pete",
"surname": "Burnap",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "asonam",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1495-1502",
"year": "2015",
"issn": null,
"isbn": "978-1-4503-3854-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07403581",
"articleId": "12OmNCd2rPB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07403583",
"articleId": "12OmNxWcHhe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/trustcom/2014/6513/0/6513a605",
"title": "Crowd Sensing of Urban Emergency Events Based on Social Media Big Data",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2014/6513a605/12OmNCwCLto",
"parentPublication": {
"id": "proceedings/trustcom/2014/6513/0",
"title": "2014 IEEE 13th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2015/3854/0/07403658",
"title": "Identifying disruptive events from social media to enhance situational awareness",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2015/07403658/12OmNqI04VQ",
"parentPublication": {
"id": "proceedings/asonam/2015/3854/0",
"title": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2015/3854/0/07403583",
"title": "Exploring a scalable solution to identifying events in noisy Twitter streams",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2015/07403583/12OmNxWcHhe",
"parentPublication": {
"id": "proceedings/asonam/2015/3854/0",
"title": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2015/8493/0/8493a001",
"title": "Automatic visual analysis of real-world events covered by social media using convolutional neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2015/8493a001/12OmNyqRn8d",
"parentPublication": {
"id": "proceedings/icdmw/2015/8493/0",
"title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2016/4470/0/4470a216",
"title": "Sensing Real-World Events Using Social Media Data and a Classification-Clustering Framework",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2016/4470a216/12OmNz5JC35",
"parentPublication": {
"id": "proceedings/wi/2016/4470/0",
"title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2018/09/08263144",
"title": "Identifying On-Site Users for Social Events: Mobility, Content, and Social Relationship",
"doi": null,
"abstractUrl": "/journal/tm/2018/09/08263144/13rRUwIF6lU",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2015/03/mex2015030044",
"title": "Identifying adverse drug events from patient social media: A case study for diabetes",
"doi": null,
"abstractUrl": "/magazine/ex/2015/03/mex2015030044/13rRUx0xQ48",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2015/11/07124472",
"title": "Indexing Evolving Events from Tweet Streams",
"doi": null,
"abstractUrl": "/journal/tk/2015/11/07124472/13rRUxNW1ZK",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08257978",
"title": "Online city-scale hyper-local event detection via analysis of social media and human mobility",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08257978/17D45VTRoDw",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/02/08778718",
"title": "User-Driven Geolocated Event Detection in Social Media",
"doi": null,
"abstractUrl": "/journal/tk/2021/02/08778718/1c8N0Ca3G4o",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB8TU5g",
"doi": "10.1109/VR.2015.7223342",
"title": "Wayfinding by auditory cues in virtual environments",
"normalizedTitle": "Wayfinding by auditory cues in virtual environments",
"abstract": "This paper describes the use of 3D sound as a wayfinding tool in virtual environments. We present the design of a study and a pilot evaluation of the use of spatial sound as a wayfinding cue on navigation in virtual environments. A pilot study with four participants offered insights suggesting that auditory cues may have the potential to help users perform wayfinding tasks in a virtual mall environment faster and more accurately than in an environment without auditory cues.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes the use of 3D sound as a wayfinding tool in virtual environments. We present the design of a study and a pilot evaluation of the use of spatial sound as a wayfinding cue on navigation in virtual environments. A pilot study with four participants offered insights suggesting that auditory cues may have the potential to help users perform wayfinding tasks in a virtual mall environment faster and more accurately than in an environment without auditory cues.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes the use of 3D sound as a wayfinding tool in virtual environments. We present the design of a study and a pilot evaluation of the use of spatial sound as a wayfinding cue on navigation in virtual environments. A pilot study with four participants offered insights suggesting that auditory cues may have the potential to help users perform wayfinding tasks in a virtual mall environment faster and more accurately than in an environment without auditory cues.",
"fno": "07223342",
"keywords": [
"Virtual Environments",
"Three Dimensional Displays",
"Buildings",
"Visualization",
"Global Positioning System",
"Cognitive Map",
"Wayfinding",
"3 D Spatial Sound"
],
"authors": [
{
"affiliation": "Department of Computer Science, Duke University",
"fullName": "Ayana Burkins",
"givenName": "Ayana",
"surname": "Burkins",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Duke immersive Virtual Environment, Duke University",
"fullName": "Regis Kopper",
"givenName": "Regis",
"surname": "Kopper",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "155-156",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223341",
"articleId": "12OmNxuo0jm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223343",
"articleId": "12OmNC8dgaB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2009/3965/0/04811207",
"title": "Wayfinding techniques for multiScale virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2009/04811207/12OmNBigFv0",
"parentPublication": {
"id": "proceedings/3dui/2009/3965/0",
"title": "2009 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476608",
"title": "Poster: Evaluation of Wayfinding Aid Techniques in Multi-Level Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476608/12OmNsdo6rt",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1995/7084/0/70840074",
"title": "Presence in virtual environments as a function of visual and auditory cues",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1995/70840074/12OmNzlUKP6",
"parentPublication": {
"id": "proceedings/vrais/1995/7084/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08063899",
"title": "Automatic Optimization of Wayfinding Design",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08063899/13rRUwI5U7Z",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2080",
"title": "Navigation with Auditory Cues in a Virtual Environment",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2080/13rRUwInvi2",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a719",
"title": "Spatial Updating in Virtual Reality – Auditory and Visual Cues in a Cave Automatic Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a719/1CJch0MXduw",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a952",
"title": "[DC] Effects of Asymmetric Locomotion Methods on Collaborative Navigation and Wayfinding in Shared Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a952/1CJfs97XQhq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798286",
"title": "Evaluating the Effectiveness of Redirected Walking with Auditory Distractors for Navigation in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798286/1cJ0PIoIPV6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a639",
"title": "Visual-Auditory Redirection: Multimodal Integration of Incongruent Visual and Auditory Cues for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a639/1pysvxeFG4E",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09431138",
"title": "CityGuide: A Seamless Indoor-Outdoor Wayfinding System for People With Vision Impairments",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09431138/1tROZUVGeu4",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwMXnuX",
"title": "2009 IEEE Symposium on 3D User Interfaces",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBigFv0",
"doi": "10.1109/3DUI.2009.4811207",
"title": "Wayfinding techniques for multiScale virtual environments",
"normalizedTitle": "Wayfinding techniques for multiScale virtual environments",
"abstract": "Wayfinding in multiscale virtual environments can be rather complex, as users can and sometimes have to change their scale to access the entire environment. Hence, this work focuses on the understanding and classification of information needed for travel, as well as on the design of navigation techniques that provide this information. To this end, we first identified two kinds of information necessary for traveling effectively in this kind of environment: hierarchical information, based on the hierarchical structure formed by the levels of scale; and spatial information, related to orientation, distance between objects in different levels of scale and spatial localization. Based on this, we designed and implemented one technique for each kind of information. The developed techniques were evaluated and compared to a baseline set of travel and wayfinding aid techniques for traveling through multiple scales. Results show that the developed techniques perform better and provide a better solution for both travel and wayfinding aid.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wayfinding in multiscale virtual environments can be rather complex, as users can and sometimes have to change their scale to access the entire environment. Hence, this work focuses on the understanding and classification of information needed for travel, as well as on the design of navigation techniques that provide this information. To this end, we first identified two kinds of information necessary for traveling effectively in this kind of environment: hierarchical information, based on the hierarchical structure formed by the levels of scale; and spatial information, related to orientation, distance between objects in different levels of scale and spatial localization. Based on this, we designed and implemented one technique for each kind of information. The developed techniques were evaluated and compared to a baseline set of travel and wayfinding aid techniques for traveling through multiple scales. Results show that the developed techniques perform better and provide a better solution for both travel and wayfinding aid.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wayfinding in multiscale virtual environments can be rather complex, as users can and sometimes have to change their scale to access the entire environment. Hence, this work focuses on the understanding and classification of information needed for travel, as well as on the design of navigation techniques that provide this information. To this end, we first identified two kinds of information necessary for traveling effectively in this kind of environment: hierarchical information, based on the hierarchical structure formed by the levels of scale; and spatial information, related to orientation, distance between objects in different levels of scale and spatial localization. Based on this, we designed and implemented one technique for each kind of information. The developed techniques were evaluated and compared to a baseline set of travel and wayfinding aid techniques for traveling through multiple scales. Results show that the developed techniques perform better and provide a better solution for both travel and wayfinding aid.",
"fno": "04811207",
"keywords": [
"Virtual Environment",
"Navigation",
"Computer Graphics",
"Humans",
"Buildings",
"Cities And Towns",
"Process Planning",
"Elementary Particles",
"Floors",
"Eyes",
"I 3 6 Computer Graphics Methodology And Techniques Interaction Techniques",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality"
],
"authors": [
{
"affiliation": "PUCRS, Brazil",
"fullName": "Felipe Bacim",
"givenName": "Felipe",
"surname": "Bacim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech, USA",
"fullName": "Doug Bowman",
"givenName": "Doug",
"surname": "Bowman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "PUCRS, Brazil",
"fullName": "Marcio Pinho",
"givenName": "Marcio",
"surname": "Pinho",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-03-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2009",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04811191",
"articleId": "12OmNweTvOg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04811198",
"articleId": "12OmNzBOhHq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223342",
"title": "Wayfinding by auditory cues in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223342/12OmNB8TU5g",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250003",
"title": "Evaluating Distributed Cognitive Resources for Wayfinding in a Desktop Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250003/12OmNBWi6MK",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2005/2660/0/237230030",
"title": "Digital Sign System for Indoor Wayfinding for the Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2005/237230030/12OmNBoNrkV",
"parentPublication": {
"id": "proceedings/cvprw/2005/2660/0",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460033",
"title": "Automatic speed adjustment for travel through immersive virtual environments based on viewpoint quality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460033/12OmNqI04GH",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476608",
"title": "Poster: Evaluation of Wayfinding Aid Techniques in Multi-Level Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476608/12OmNsdo6rt",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240175",
"title": "Design and Evaluation of Navigation Techniques for Multiscale Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240175/12OmNyv7mkJ",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a952",
"title": "[DC] Effects of Asymmetric Locomotion Methods on Collaborative Navigation and Wayfinding in Shared Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a952/1CJfs97XQhq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049698",
"title": "Gaining the High Ground: Teleportation to Mid-Air Targets in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049698/1KYotugT0xW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10113740",
"title": "SceneFusion: Room-Scale Environmental Fusion for Efficient Traveling Between Separate Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10113740/1MNbKAm1pUQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a680",
"title": "Designing Viewpoint Transition Techniques in Multiscale Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a680/1MNgp7L6LcY",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAWpykB",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Workshops",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBoNrkV",
"doi": "10.1109/CVPR.2005.442",
"title": "Digital Sign System for Indoor Wayfinding for the Visually Impaired",
"normalizedTitle": "Digital Sign System for Indoor Wayfinding for the Visually Impaired",
"abstract": "Mobility challenges and independent travel are major concerns for blind and visually impaired pedestrians [1][2]. Navigation and wayfinding in unfamiliar indoor environments are particularly challenging because blind pedestrians do not have ready access to building maps, signs and other orienting devices. The development of assistive technologies to aid wayfinding is hampered by the lack of a reliable and costefficient method for providing location information in an indoor environment. Here we describe the design and implementation of a digital sign system based on low-cost passive retro-reflective tags printed with specially designed patterns that can be readily detected and identified by a handheld camera and machine-vision system. Performance of the prototype showed the tag detection/recognition system could cope with the real-world environment of a typical building.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mobility challenges and independent travel are major concerns for blind and visually impaired pedestrians [1][2]. Navigation and wayfinding in unfamiliar indoor environments are particularly challenging because blind pedestrians do not have ready access to building maps, signs and other orienting devices. The development of assistive technologies to aid wayfinding is hampered by the lack of a reliable and costefficient method for providing location information in an indoor environment. Here we describe the design and implementation of a digital sign system based on low-cost passive retro-reflective tags printed with specially designed patterns that can be readily detected and identified by a handheld camera and machine-vision system. Performance of the prototype showed the tag detection/recognition system could cope with the real-world environment of a typical building.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mobility challenges and independent travel are major concerns for blind and visually impaired pedestrians [1][2]. Navigation and wayfinding in unfamiliar indoor environments are particularly challenging because blind pedestrians do not have ready access to building maps, signs and other orienting devices. The development of assistive technologies to aid wayfinding is hampered by the lack of a reliable and costefficient method for providing location information in an indoor environment. Here we describe the design and implementation of a digital sign system based on low-cost passive retro-reflective tags printed with specially designed patterns that can be readily detected and identified by a handheld camera and machine-vision system. Performance of the prototype showed the tag detection/recognition system could cope with the real-world environment of a typical building.",
"fno": "237230030",
"keywords": [],
"authors": [
{
"affiliation": "University of Southern California, Los Angeles",
"fullName": "B.S. Tjan",
"givenName": "B.S.",
"surname": "Tjan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beckmann Research, LLC",
"fullName": "P.J. Beckmann",
"givenName": "P.J.",
"surname": "Beckmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Minnesota",
"fullName": "R. Roy",
"givenName": "R.",
"surname": "Roy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, Santa Barbara",
"fullName": "N. Giudice",
"givenName": "N.",
"surname": "Giudice",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Minnesota",
"fullName": "G.E. Legge",
"givenName": "G.E.",
"surname": "Legge",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-06-01T00:00:00",
"pubType": "proceedings",
"pages": "30",
"year": "2005",
"issn": "1063-6919",
"isbn": "0-7695-2660-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01565341",
"articleId": "12OmNBKmXoo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "237230031",
"articleId": "12OmNxwENo2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2011/1189/0/05999159",
"title": "Self-adaptive application for indoor wayfinding for individuals with cognitive impairments",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2011/05999159/12OmNx7ouXD",
"parentPublication": {
"id": "proceedings/cbms/2011/1189/0",
"title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2011/1612/0/06112422",
"title": "Indoor signage detection based on saliency map and bipartite graph matching",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2011/06112422/12OmNxuFBo0",
"parentPublication": {
"id": "proceedings/bibmw/2011/1612/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2011/0774/0/05959608",
"title": "Indoor-Outdoor Navigation System for Visually-Impaired Pedestrians: Preliminary Evaluation of Position Measurement and Obstacle Display",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2011/05959608/12OmNyUnEET",
"parentPublication": {
"id": "proceedings/iswc/2011/0774/0",
"title": "2011 15th Annual International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08063899",
"title": "Automatic Optimization of Wayfinding Design",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08063899/13rRUwI5U7Z",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icimp/2007/2911/0/04271783",
"title": "Path Planning and Following Algorithms in an Indoor Navigation Model for Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/icimp/2007/04271783/17D45XoXP7D",
"parentPublication": {
"id": "proceedings/icimp/2007/2911/0",
"title": "Internet Monitoring and Protection, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a210",
"title": "A Comparative Analysis of Visual-Inertial SLAM for Assisted Wayfinding of the Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a210/18j8P4rWFdm",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400a936",
"title": "Indoor Electronic Traveling Aids for Visually Impaired: Systemic Review",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400a936/1jdDS4l7A3u",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartiot/2020/6514/0/09192012",
"title": "Shortest Path Based Trained Indoor Smart Jacket Navigation System for Visually Impaired Person",
"doi": null,
"abstractUrl": "/proceedings-article/smartiot/2020/09192012/1n0IuhZD6Rq",
"parentPublication": {
"id": "proceedings/smartiot/2020/6514/0",
"title": "2020 IEEE International Conference on Smart Internet of Things (SmartIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09431007",
"title": "PathLookup: A Deep Learning-Based Framework to Assist Visually Impaired in Outdoor Wayfinding",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09431007/1tROJgKMUqQ",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09431138",
"title": "CityGuide: A Seamless Indoor-Outdoor Wayfinding System for People With Vision Impairments",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09431138/1tROZUVGeu4",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNs4S8vE",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNsdo6rt",
"doi": "10.1109/3DUI.2008.4476608",
"title": "Poster: Evaluation of Wayfinding Aid Techniques in Multi-Level Virtual Environments",
"normalizedTitle": "Poster: Evaluation of Wayfinding Aid Techniques in Multi-Level Virtual Environments",
"abstract": "This work deals with the evaluation of wayfinding aid techniques in multi-level virtual environments. The evaluation was accomplished through the development of two applications that implement four of the most commonly used wayfinding aid techniques (maps, compasses, landmarks and signs) and tested with two groups of users in two different experiments. The test results showed that compass is the technique which demanded less cognitive effort and that experience with games affects positively the users' performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work deals with the evaluation of wayfinding aid techniques in multi-level virtual environments. The evaluation was accomplished through the development of two applications that implement four of the most commonly used wayfinding aid techniques (maps, compasses, landmarks and signs) and tested with two groups of users in two different experiments. The test results showed that compass is the technique which demanded less cognitive effort and that experience with games affects positively the users' performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work deals with the evaluation of wayfinding aid techniques in multi-level virtual environments. The evaluation was accomplished through the development of two applications that implement four of the most commonly used wayfinding aid techniques (maps, compasses, landmarks and signs) and tested with two groups of users in two different experiments. The test results showed that compass is the technique which demanded less cognitive effort and that experience with games affects positively the users' performance.",
"fno": "04476608",
"keywords": [
"User Interfaces",
"Virtual Reality",
"Wayfinding Aid Techniques",
"Multilevel Virtual Environments",
"Map Technique",
"Landmark Technique",
"Sign Technique",
"Compass Technique",
"Virtual Environment",
"Testing",
"Computer Science",
"Navigation",
"Virtual Reality",
"Floors",
"Multimedia Systems",
"Computer Graphics",
"Topology",
"Electronic Mail",
"Wayfinding Aids Techniques",
"Navigation In Virtual Environments",
"Virtual Reality",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial",
"Augmented",
"And Virtual Realities",
"I 3 6 Computer Graphics Methodology And Techniques Interaction Techniques"
],
"authors": [
{
"affiliation": "Faculty of Computer Science, PUCRS-Brazil",
"fullName": "Felipe Bacim",
"givenName": "Felipe",
"surname": "Bacim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Computer Science, PUCRS-Brazil",
"fullName": "Andre Trombetta",
"givenName": "Andre",
"surname": "Trombetta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Computer Science, PUCRS-Brazil",
"fullName": "Rafael Rieder",
"givenName": "Rafael",
"surname": "Rieder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Computer Science, PUCRS-Brazil, e-mail: pinho@pucrs.br",
"fullName": "Marcio Pinho",
"givenName": "Marcio",
"surname": "Pinho",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-03-01T00:00:00",
"pubType": "proceedings",
"pages": "143-144",
"year": "2008",
"issn": null,
"isbn": "978-1-4244-2047-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04476607",
"articleId": "12OmNqFJhIO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04476593",
"articleId": "12OmNvDI3OQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223342",
"title": "Wayfinding by auditory cues in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223342/12OmNB8TU5g",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250003",
"title": "Evaluating Distributed Cognitive Resources for Wayfinding in a Desktop Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250003/12OmNBWi6MK",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2009/3965/0/04811207",
"title": "Wayfinding techniques for multiScale virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2009/04811207/12OmNBigFv0",
"parentPublication": {
"id": "proceedings/3dui/2009/3965/0",
"title": "2009 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/coginf/2010/8042/0/05599724",
"title": "Using space syntax to understand knowledge acquisition and wayfinding in indoor environments",
"doi": null,
"abstractUrl": "/proceedings-article/coginf/2010/05599724/12OmNwMFMmw",
"parentPublication": {
"id": "proceedings/coginf/2010/8042/0",
"title": "2010 9th IEEE International Conference on Cognitive Informatics (ICCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2009/3965/0/04811223",
"title": "Poster: Vibration as a wayfinding aid",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2009/04811223/12OmNzw8j4W",
"parentPublication": {
"id": "proceedings/3dui/2009/3965/0",
"title": "2009 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08447553",
"title": "Interactive Exploration Assistance for Immersive Virtual Environments Based on Object Visibility and Viewpoint Quality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08447553/13bd1f3HvF3",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08063899",
"title": "Automatic Optimization of Wayfinding Design",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08063899/13rRUwI5U7Z",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a952",
"title": "[DC] Effects of Asymmetric Locomotion Methods on Collaborative Navigation and Wayfinding in Shared Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a952/1CJfs97XQhq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090399",
"title": "Map Displays And Landmark Effects On Wayfinding In Unfamiliar Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090399/1jIxkn3znZC",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09431138",
"title": "CityGuide: A Seamless Indoor-Outdoor Wayfinding System for People With Vision Impairments",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09431138/1tROZUVGeu4",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx7ouMq",
"title": "2018 IEEE 32nd International Conference on Advanced Information Networking and Applications (AINA)",
"acronym": "aina",
"groupId": "1000008",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxWcH87",
"doi": "10.1109/AINA.2018.00078",
"title": "Wayfinding Behavior Detection by Smartphone",
"normalizedTitle": "Wayfinding Behavior Detection by Smartphone",
"abstract": "While heading to a destination, we usually rely on our cognitive map constructed by audiovisual information from maps and our sight. However, errors or gaps between the real and our cognitive map often confuse us and lead to \"wayfinding\". In such a wayfinding state, we tend to take actions like wandering for perceiving errors and gathering information about surrounding environment. If such behavior can be detected by smartphones, we may design new applications on the smartphones, for instance, virtual \"concierge\" that timely helps us when we lose our ways. Also grasping spots where people are likely to lose their ways in large museums and theme parks would be useful to install or improve the signs and directions to support visitors. In this paper, we propose a method to detect individuals' wayfinding behavior from walking features by smartphone sensors. Based on the preliminary experiment, we extract sensor data features that can be collected through Android OS without privacy concerns, and build a binary classifier of user states, \"normal\" and \"wayfinding\". Through the two field experiments with 17 and 104 subjects, we have confirmed that our classifier achieved the F-measure of 0.93 and 0.85, respectively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While heading to a destination, we usually rely on our cognitive map constructed by audiovisual information from maps and our sight. However, errors or gaps between the real and our cognitive map often confuse us and lead to \"wayfinding\". In such a wayfinding state, we tend to take actions like wandering for perceiving errors and gathering information about surrounding environment. If such behavior can be detected by smartphones, we may design new applications on the smartphones, for instance, virtual \"concierge\" that timely helps us when we lose our ways. Also grasping spots where people are likely to lose their ways in large museums and theme parks would be useful to install or improve the signs and directions to support visitors. In this paper, we propose a method to detect individuals' wayfinding behavior from walking features by smartphone sensors. Based on the preliminary experiment, we extract sensor data features that can be collected through Android OS without privacy concerns, and build a binary classifier of user states, \"normal\" and \"wayfinding\". Through the two field experiments with 17 and 104 subjects, we have confirmed that our classifier achieved the F-measure of 0.93 and 0.85, respectively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While heading to a destination, we usually rely on our cognitive map constructed by audiovisual information from maps and our sight. However, errors or gaps between the real and our cognitive map often confuse us and lead to \"wayfinding\". In such a wayfinding state, we tend to take actions like wandering for perceiving errors and gathering information about surrounding environment. If such behavior can be detected by smartphones, we may design new applications on the smartphones, for instance, virtual \"concierge\" that timely helps us when we lose our ways. Also grasping spots where people are likely to lose their ways in large museums and theme parks would be useful to install or improve the signs and directions to support visitors. In this paper, we propose a method to detect individuals' wayfinding behavior from walking features by smartphone sensors. Based on the preliminary experiment, we extract sensor data features that can be collected through Android OS without privacy concerns, and build a binary classifier of user states, \"normal\" and \"wayfinding\". Through the two field experiments with 17 and 104 subjects, we have confirmed that our classifier achieved the F-measure of 0.93 and 0.85, respectively.",
"fno": "219501a488",
"keywords": [
"Mobile Computing",
"Smart Phones",
"Surrounding Environment",
"Smartphone Sensors",
"Sensor Data Features",
"User States",
"Wayfinding Behavior Detection",
"Cognitive Map",
"Audiovisual Information",
"Information Gathering",
"Android OS",
"Binary Classifier",
"F Measure",
"Legged Locomotion",
"Feature Extraction",
"Games",
"Data Mining",
"Sensor Phenomena And Characterization",
"Navigation",
"Wayfinding Behavior",
"Cognitive Map",
"Smartphone",
"VR"
],
"authors": [
{
"affiliation": null,
"fullName": "Ryosuke Narimoto",
"givenName": "Ryosuke",
"surname": "Narimoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shugo Kajita",
"givenName": "Shugo",
"surname": "Kajita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hirozumi Yamaguchi",
"givenName": "Hirozumi",
"surname": "Yamaguchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Teruo Higashino",
"givenName": "Teruo",
"surname": "Higashino",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aina",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-05-01T00:00:00",
"pubType": "proceedings",
"pages": "488-495",
"year": "2018",
"issn": "2332-5658",
"isbn": "978-1-5386-2195-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "219501a480",
"articleId": "12OmNBfIhcY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "219501a496",
"articleId": "12OmNxEByXG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2006/0225/0/02250003",
"title": "Evaluating Distributed Cognitive Resources for Wayfinding in a Desktop Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250003/12OmNBWi6MK",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candar/2015/9797/0/9797a463",
"title": "A Behavior Authentication Method Using Wi-Fi BSSIDs around Smartphone Carried by a User",
"doi": null,
"abstractUrl": "/proceedings-article/candar/2015/9797a463/12OmNCf1Dip",
"parentPublication": {
"id": "proceedings/candar/2015/9797/0",
"title": "2015 Third International Symposium on Computing and Networking (CANDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476608",
"title": "Poster: Evaluation of Wayfinding Aid Techniques in Multi-Level Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476608/12OmNsdo6rt",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2018/2659/0/265901a102",
"title": "Activity Detection and Analysis Using Smartphone Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2018/265901a102/12OmNvDI3KD",
"parentPublication": {
"id": "proceedings/iri/2018/2659/0",
"title": "2018 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2014/2736/0/06815158",
"title": "Self-calibration of walking speed estimations using smartphone sensors",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2014/06815158/12OmNvSbBwL",
"parentPublication": {
"id": "proceedings/percomw/2014/2736/0",
"title": "2014 IEEE International Conference on Pervasive Computing and Communication Workshops (PERCOM WORKSHOPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2012/1204/0/06184219",
"title": "HeatMeUp: A 3DUI serious game to explore collaborative wayfinding",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2012/06184219/12OmNxjjEj6",
"parentPublication": {
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2015/8425/0/07134104",
"title": "Towards detection of bad habits by fusing smartphone and smartwatch sensors",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2015/07134104/12OmNyYm2G9",
"parentPublication": {
"id": "proceedings/percomw/2015/8425/0",
"title": "2015 IEEE International Conference on Pervasive Computing and Communication Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2009/3965/0/04811223",
"title": "Poster: Vibration as a wayfinding aid",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2009/04811223/12OmNzw8j4W",
"parentPublication": {
"id": "proceedings/3dui/2009/3965/0",
"title": "2009 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08063899",
"title": "Automatic Optimization of Wayfinding Design",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08063899/13rRUwI5U7Z",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a596",
"title": "VR Wayfinding Training for People with Visual Impairment using VR Treadmill and VR Tracker",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a596/1CJf4aHcqoU",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx4gUtM",
"title": "2012 IEEE/ACIS 11th International Conference on Computer and Information Science (ICIS)",
"acronym": "icis",
"groupId": "1001200",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyjLoOR",
"doi": "10.1109/ICIS.2012.96",
"title": "Sensorimotor Representation of Space: Application in Autonomous Systems and in a Wayfinding Assistant for Alzheimer's Disease",
"normalizedTitle": "Sensorimotor Representation of Space: Application in Autonomous Systems and in a Wayfinding Assistant for Alzheimer's Disease",
"abstract": "We are interested in a bio-inspired representation of space that can be used for the design of autonomous systems and assistive devices. Experiments with subjects navigating in physically impossible environments indicate that the basic human representation of space cannot be map-like. Rather, it seems to be sensori motor in nature, combining sensory features with motor actions. We argue that such a sensori motor representation is important for the design of artificial systems that have to deal with spatial data. Here we demonstrate that this refers to both autonomous systems for spatial exploration and navigation and to systems for human assistance, in particular a navigation assistance and training system for people with Alzheimer's disease (AD). Disorientation and getting lost behavior are early signs of AD in most patients. The proposed system will focus on cognitive deficits associated with the disease by using and training skills that are still likely to be intact in the individual person. By means of guiding the attention to salient and autobiographically important landmarks, and practicing routes between them, the system will help to build up and train a more resilient sensori-motor representation. The user interface focuses on combining the visual scenes of the incorporated landmarks with a simplified map of the surrounding area.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We are interested in a bio-inspired representation of space that can be used for the design of autonomous systems and assistive devices. Experiments with subjects navigating in physically impossible environments indicate that the basic human representation of space cannot be map-like. Rather, it seems to be sensori motor in nature, combining sensory features with motor actions. We argue that such a sensori motor representation is important for the design of artificial systems that have to deal with spatial data. Here we demonstrate that this refers to both autonomous systems for spatial exploration and navigation and to systems for human assistance, in particular a navigation assistance and training system for people with Alzheimer's disease (AD). Disorientation and getting lost behavior are early signs of AD in most patients. The proposed system will focus on cognitive deficits associated with the disease by using and training skills that are still likely to be intact in the individual person. By means of guiding the attention to salient and autobiographically important landmarks, and practicing routes between them, the system will help to build up and train a more resilient sensori-motor representation. The user interface focuses on combining the visual scenes of the incorporated landmarks with a simplified map of the surrounding area.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We are interested in a bio-inspired representation of space that can be used for the design of autonomous systems and assistive devices. Experiments with subjects navigating in physically impossible environments indicate that the basic human representation of space cannot be map-like. Rather, it seems to be sensori motor in nature, combining sensory features with motor actions. We argue that such a sensori motor representation is important for the design of artificial systems that have to deal with spatial data. Here we demonstrate that this refers to both autonomous systems for spatial exploration and navigation and to systems for human assistance, in particular a navigation assistance and training system for people with Alzheimer's disease (AD). Disorientation and getting lost behavior are early signs of AD in most patients. The proposed system will focus on cognitive deficits associated with the disease by using and training skills that are still likely to be intact in the individual person. By means of guiding the attention to salient and autobiographically important landmarks, and practicing routes between them, the system will help to build up and train a more resilient sensori-motor representation. The user interface focuses on combining the visual scenes of the incorporated landmarks with a simplified map of the surrounding area.",
"fno": "06211100",
"keywords": [
"Computer Based Training",
"Diseases",
"Handicapped Aids",
"Health Care",
"Human Computer Interaction",
"Multi Agent Systems",
"User Interfaces",
"Space Sensorimotor Representation",
"Autonomous Systems",
"Wayfinding Assistant",
"Alzheimer Disease",
"Assistive Devices",
"Bio Inspired Representation",
"Sensory Features",
"Motor Actions",
"Artificial System Design",
"Spatial Exploration",
"Spatial Navigation",
"Navigation Assistance",
"Training System",
"User Interface",
"Visual Scenes",
"Navigation",
"Alzheimers Disease",
"Visualization",
"Virtual Environments",
"Training",
"Buildings",
"Assistance System",
"Alzheimer",
"Navigation",
"Spatial Cognition",
"Sensorimotor",
"Dementia",
"Virtual Reality",
"Disorientation"
],
"authors": [
{
"affiliation": null,
"fullName": "Christoph Zetzsche",
"givenName": "Christoph",
"surname": "Zetzsche",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Torben Gerkensmeyer",
"givenName": "Torben",
"surname": "Gerkensmeyer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Falko Schmid",
"givenName": "Falko",
"surname": "Schmid",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kerstin Schill",
"givenName": "Kerstin",
"surname": "Schill",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-05-01T00:00:00",
"pubType": "proceedings",
"pages": "219-224",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1536-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06211099",
"articleId": "12OmNwEJ0Hj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06211101",
"articleId": "12OmNAXxWTJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/prni/2012/4765/0/4765a105",
"title": "A Composite Multivariate Polygenic and Neuroimaging Score for Prediction of Conversion to Alzheimer's Disease",
"doi": null,
"abstractUrl": "/proceedings-article/prni/2012/4765a105/12OmNAqCtPR",
"parentPublication": {
"id": "proceedings/prni/2012/4765/0",
"title": "Pattern Recognition in NeuroImaging, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmlc/2010/6006/0/05460738",
"title": "Classification of Alzheimer's Disease and Parkinson's Disease by Using Machine Learning and Neural Network Methods",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2010/05460738/12OmNBOCWat",
"parentPublication": {
"id": "proceedings/icmlc/2010/6006/0",
"title": "2nd International Conference on Machine Learning and Computing (ICMLC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118d089",
"title": "Matrix-Similarity Based Loss Function and Feature Selection for Alzheimer's Disease Diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118d089/12OmNBrlPwl",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881303",
"title": "Initial Results in Alzheimer's Disease Progression Modeling Using Imputed Health State Profiles",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881303/12OmNCcKQCg",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a822",
"title": "Assessing Graph Properties and Dynamics of the Functional Brain Networks in Alzheimer's Disease",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a822/12OmNCu4nfi",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2014/5880/0/07051901",
"title": "Analyzing Alzheimer's disease gene expression dataset using clustering and association rule mining",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2014/07051901/12OmNxX3uHY",
"parentPublication": {
"id": "proceedings/iri/2014/5880/0",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2018/6217/0/247100a041",
"title": "[Regular Paper] Texture Biomarkers of Alzheimer's Disease and Disease Progression in the Mouse Retina",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2018/247100a041/17D45Vu1Ty5",
"parentPublication": {
"id": "proceedings/bibe/2018/6217/0",
"title": "2018 IEEE 18th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b962",
"title": "Early Diagnosis of Alzheimer's Disease: A Neuroimaging Study with Deep Learning Architectures",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b962/17D45WrVg24",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-smartcity-dss/2018/6614/0/661400b455",
"title": "Effective Use of Data Science Toward Early Prediction of Alzheimer's Disease",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2018/661400b455/183rAe9yiLq",
"parentPublication": {
"id": "proceedings/hpcc-smartcity-dss/2018/6614/0",
"title": "2018 IEEE 20th International Conference on High Performance Computing and Communications; IEEE 16th International Conference on Smart City; IEEE 4th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2018/6956/0/695600a248",
"title": "Classification of Alzheimer's Disease Based on Stacked Denoising Autoencoder",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2018/695600a248/1dUo29kv0CA",
"parentPublication": {
"id": "proceedings/icnisc/2018/6956/0",
"title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJfs97XQhq",
"doi": "10.1109/VRW55335.2022.00327",
"title": "[DC] Effects of Asymmetric Locomotion Methods on Collaborative Navigation and Wayfinding in Shared Virtual Environments",
"normalizedTitle": "[DC] Effects of Asymmetric Locomotion Methods on Collaborative Navigation and Wayfinding in Shared Virtual Environments",
"abstract": "Navigation and wayfinding can be accomplished either by single person or a group of people. Using the help of immersive virtual reality technology, significant research has been conducted to find out how a person can navigate and wayfind in a virtual world. However, there has been little work done that asks how multiple people can collaboratively navigate and wayfind in a virtual world. In this proposal, we investigate this question with a specific interest on how different locomotion methods can affect the acquired knowledge of a group of individuals in a distributed, shared virtual environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Navigation and wayfinding can be accomplished either by single person or a group of people. Using the help of immersive virtual reality technology, significant research has been conducted to find out how a person can navigate and wayfind in a virtual world. However, there has been little work done that asks how multiple people can collaboratively navigate and wayfind in a virtual world. In this proposal, we investigate this question with a specific interest on how different locomotion methods can affect the acquired knowledge of a group of individuals in a distributed, shared virtual environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Navigation and wayfinding can be accomplished either by single person or a group of people. Using the help of immersive virtual reality technology, significant research has been conducted to find out how a person can navigate and wayfind in a virtual world. However, there has been little work done that asks how multiple people can collaboratively navigate and wayfind in a virtual world. In this proposal, we investigate this question with a specific interest on how different locomotion methods can affect the acquired knowledge of a group of individuals in a distributed, shared virtual environment.",
"fno": "840200a952",
"keywords": [
"Virtual Reality",
"Asymmetric Locomotion Methods",
"Collaborative Navigation",
"Wayfinding",
"Shared Virtual Environments",
"Single Person",
"Immersive Virtual Reality Technology",
"Virtual World",
"Distributed Shared Virtual Environment",
"Three Dimensional Displays",
"Navigation",
"Conferences",
"Virtual Environments",
"Collaboration",
"User Interfaces",
"Hardware",
"Virtual Reality",
"Locomotion Methods",
"Collaborative Navigation",
"Distributed Virtual Environments"
],
"authors": [
{
"affiliation": "Vanderbilt University,USA",
"fullName": "Soumyajit Chakraborty",
"givenName": "Soumyajit",
"surname": "Chakraborty",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "952-953",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a950",
"articleId": "1CJefBkSOY0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a954",
"articleId": "1CJfdg0rEyY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223342",
"title": "Wayfinding by auditory cues in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223342/12OmNB8TU5g",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250003",
"title": "Evaluating Distributed Cognitive Resources for Wayfinding in a Desktop Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250003/12OmNBWi6MK",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2018/2195/0/219501a488",
"title": "Wayfinding Behavior Detection by Smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2018/219501a488/12OmNxWcH87",
"parentPublication": {
"id": "proceedings/aina/2018/2195/0",
"title": "2018 IEEE 32nd International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08063899",
"title": "Automatic Optimization of Wayfinding Design",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08063899/13rRUwI5U7Z",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/06109251",
"title": "The Design and Evaluation of a Large-Scale Real-Walking Locomotion Interface",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/06109251/13rRUygT7mV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a930",
"title": "[DC] Leveraging AR Cues towards New Navigation Assistant Paradigm",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a930/1CJcTykeypq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a019",
"title": "Behind the Curtains: Comparing Mozilla Hubs with Microsoft Teams in a Guided Virtual Theatre Experience",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a019/1CJd5FkAKas",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089501",
"title": "Dyadic Acquisition of Survey Knowledge in a Shared Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089501/1jIx8OgJaoM",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090399",
"title": "Map Displays And Landmark Effects On Wayfinding In Unfamiliar Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090399/1jIxkn3znZC",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a493",
"title": "Evaluation of Body-centric Locomotion with Different Transfer Functions in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a493/1tuBnu6n9jq",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tROFXZKX3q",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"acronym": "percom-workshops",
"groupId": "1000552",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tROJgKMUqQ",
"doi": "10.1109/PerComWorkshops51409.2021.9431007",
"title": "PathLookup: A Deep Learning-Based Framework to Assist Visually Impaired in Outdoor Wayfinding",
"normalizedTitle": "PathLookup: A Deep Learning-Based Framework to Assist Visually Impaired in Outdoor Wayfinding",
"abstract": "Reading and following visual signs remains the predominant mechanism for navigation and receiving wayfinding information in areas without accurate GPS coverage. This puts people who are blind or visually impaired (BVI) at a great disadvantage. There still remains a great need to provide a low-cost, easy to use, and reliable auxiliary wayfinding system within indoor and outdoor spaces that complements existing satellite-based systems. Through both a user study and a quantitative study of GPS accuracies in outdoor environments, this paper highlights the need for auxiliary outdoor wayfinding tools for people with visual impairments. A deep learning-based image localization framework called PathLookup is proposed in this work for accurately providing path advancement information for outdoor wayfinding. Evaluation results show PathLookup to be highly accurate and fast potentially proving to be a valuable tool for future integration into outdoor wayfinding systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Reading and following visual signs remains the predominant mechanism for navigation and receiving wayfinding information in areas without accurate GPS coverage. This puts people who are blind or visually impaired (BVI) at a great disadvantage. There still remains a great need to provide a low-cost, easy to use, and reliable auxiliary wayfinding system within indoor and outdoor spaces that complements existing satellite-based systems. Through both a user study and a quantitative study of GPS accuracies in outdoor environments, this paper highlights the need for auxiliary outdoor wayfinding tools for people with visual impairments. A deep learning-based image localization framework called PathLookup is proposed in this work for accurately providing path advancement information for outdoor wayfinding. Evaluation results show PathLookup to be highly accurate and fast potentially proving to be a valuable tool for future integration into outdoor wayfinding systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Reading and following visual signs remains the predominant mechanism for navigation and receiving wayfinding information in areas without accurate GPS coverage. This puts people who are blind or visually impaired (BVI) at a great disadvantage. There still remains a great need to provide a low-cost, easy to use, and reliable auxiliary wayfinding system within indoor and outdoor spaces that complements existing satellite-based systems. Through both a user study and a quantitative study of GPS accuracies in outdoor environments, this paper highlights the need for auxiliary outdoor wayfinding tools for people with visual impairments. A deep learning-based image localization framework called PathLookup is proposed in this work for accurately providing path advancement information for outdoor wayfinding. Evaluation results show PathLookup to be highly accurate and fast potentially proving to be a valuable tool for future integration into outdoor wayfinding systems.",
"fno": "09431007",
"keywords": [
"Deep Learning Artificial Intelligence",
"Global Positioning System",
"Handicapped Aids",
"Image Processing",
"Mobile Computing",
"Navigation",
"Traffic Engineering Computing",
"Navigation",
"Wayfinding Information",
"Satellite Based Systems",
"GPS Accuracies",
"Visual Impairments",
"Deep Learning Based Image Localization Framework",
"Path Lookup",
"Path Advancement Information",
"Outdoor Wayfinding Systems",
"Pervasive Computing",
"Location Awareness",
"Visualization",
"Navigation",
"Conferences",
"Tools",
"Reliability",
"Accessibility Technologies",
"Outdoor Wayfinding",
"Deep Learning",
"Computer Vision",
"Visually Impaired Persons"
],
"authors": [
{
"affiliation": "The College of New Jersey,Department of Computer Science,Ewing,NJ,USA,08628",
"fullName": "Uddipan Das",
"givenName": "Uddipan",
"surname": "Das",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wichita State University,Department of EECS,Wichita,KS,USA,67260",
"fullName": "Vinod Namboodiri",
"givenName": "Vinod",
"surname": "Namboodiri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wichita State University,Department of EECS,Wichita,KS,USA,67260",
"fullName": "Hongsheng He",
"givenName": "Hongsheng",
"surname": "He",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "percom-workshops",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "111-116",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0424-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09431076",
"articleId": "1tROTV4SeQ0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09431116",
"articleId": "1tROSuD7CXC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2005/2660/0/237230030",
"title": "Digital Sign System for Indoor Wayfinding for the Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2005/237230030/12OmNBoNrkV",
"parentPublication": {
"id": "proceedings/cvprw/2005/2660/0",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iitsi/2009/3579/0/3579a042",
"title": "A Rapid Acquisition Algorithm of WSN-aided GPS Location",
"doi": null,
"abstractUrl": "/proceedings-article/iitsi/2009/3579a042/12OmNwHQB93",
"parentPublication": {
"id": "proceedings/iitsi/2009/3579/0",
"title": "Intelligent Information Technology and Security Informatics, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgciot/2015/7910/0/07380444",
"title": "Analysis of computer vision and sensor technologies to assist the visually impaired",
"doi": null,
"abstractUrl": "/proceedings-article/icgciot/2015/07380444/12OmNyvoXhw",
"parentPublication": {
"id": "proceedings/icgciot/2015/7910/0",
"title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seus-wccia/2006/2560/0/25600037",
"title": "An Architecture for Providing Java Applications with Indoor and Outdoor Hybrid Location Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/seus-wccia/2006/25600037/12OmNzayNgw",
"parentPublication": {
"id": "proceedings/seus-wccia/2006/2560/0",
"title": "Software Technologies for Future Embedded and Ubiquitous Systems, and International Workshop on Collaborative Computing, Integration, and Assurance, The IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlsid/2017/5740/0/5740a213",
"title": "MAVI: An Embedded Device to Assist Mobility of Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/vlsid/2017/5740a213/12OmNzayNjK",
"parentPublication": {
"id": "proceedings/vlsid/2017/5740/0",
"title": "2017 30th International Conference on VLSI Design and 2017 16th International Conference on Embedded Systems (VLSID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a210",
"title": "A Comparative Analysis of Visual-Inertial SLAM for Assisted Wayfinding of the Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a210/18j8P4rWFdm",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400a936",
"title": "Indoor Electronic Traveling Aids for Visually Impaired: Systemic Review",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400a936/1jdDS4l7A3u",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a051",
"title": "Exploring Virtual Environments by Visually Impaired Using a Mixed Reality Cane Without Visual Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a051/1pBMgh7AbaU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09431138",
"title": "CityGuide: A Seamless Indoor-Outdoor Wayfinding System for People With Vision Impairments",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09431138/1tROZUVGeu4",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100b718",
"title": "Audi-Exchange: AI-Guided Hand-based Actions to Assist Human-Human Interactions for the Blind and the Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100b718/1yNiwUbbppS",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tROFXZKX3q",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"acronym": "percom-workshops",
"groupId": "1000552",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tROZUVGeu4",
"doi": "10.1109/PerComWorkshops51409.2021.9431138",
"title": "CityGuide: A Seamless Indoor-Outdoor Wayfinding System for People With Vision Impairments",
"normalizedTitle": "CityGuide: A Seamless Indoor-Outdoor Wayfinding System for People With Vision Impairments",
"abstract": "GPS accuracy is poor in indoor environments and around buildings. Thus, reading and following signs still remains the most common mechanism for providing and receiving wayfinding information in such spaces. This puts individuals who are blind or visually impaired (BVI) at a great disadvantage, and thus, there remains a great need to provide a low-cost, easy to use, and reliable wayfinding system within indoor and outdoor spaces that complements existing satellite-based systems. This work designs, implements, and evaluates a wayfinding system and smartphone application called CityGuide that can be used by BVI individuals to navigate their surroundings beyond what is possible with just a GPS-based system. CityGuide enables an individual to query and get turn-by-turn shortest route directions from an indoor location to an outdoor location. CityGuide leverages recently developed indoor wayfinding solutions in conjunction with GPS signals to provide a seamless indoor-outdoor navigation and wayfinding system that guides a BVI individual to their desired destination through the shortest route. Evaluations of CityGuide with BVI human subjects navigating between an indoor starting point to an outdoor destination within an unfamiliar university campus scenario showed it to be effective in reducing end-to-end navigation times and distances of almost all participants.",
"abstracts": [
{
"abstractType": "Regular",
"content": "GPS accuracy is poor in indoor environments and around buildings. Thus, reading and following signs still remains the most common mechanism for providing and receiving wayfinding information in such spaces. This puts individuals who are blind or visually impaired (BVI) at a great disadvantage, and thus, there remains a great need to provide a low-cost, easy to use, and reliable wayfinding system within indoor and outdoor spaces that complements existing satellite-based systems. This work designs, implements, and evaluates a wayfinding system and smartphone application called CityGuide that can be used by BVI individuals to navigate their surroundings beyond what is possible with just a GPS-based system. CityGuide enables an individual to query and get turn-by-turn shortest route directions from an indoor location to an outdoor location. CityGuide leverages recently developed indoor wayfinding solutions in conjunction with GPS signals to provide a seamless indoor-outdoor navigation and wayfinding system that guides a BVI individual to their desired destination through the shortest route. Evaluations of CityGuide with BVI human subjects navigating between an indoor starting point to an outdoor destination within an unfamiliar university campus scenario showed it to be effective in reducing end-to-end navigation times and distances of almost all participants.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "GPS accuracy is poor in indoor environments and around buildings. Thus, reading and following signs still remains the most common mechanism for providing and receiving wayfinding information in such spaces. This puts individuals who are blind or visually impaired (BVI) at a great disadvantage, and thus, there remains a great need to provide a low-cost, easy to use, and reliable wayfinding system within indoor and outdoor spaces that complements existing satellite-based systems. This work designs, implements, and evaluates a wayfinding system and smartphone application called CityGuide that can be used by BVI individuals to navigate their surroundings beyond what is possible with just a GPS-based system. CityGuide enables an individual to query and get turn-by-turn shortest route directions from an indoor location to an outdoor location. CityGuide leverages recently developed indoor wayfinding solutions in conjunction with GPS signals to provide a seamless indoor-outdoor navigation and wayfinding system that guides a BVI individual to their desired destination through the shortest route. Evaluations of CityGuide with BVI human subjects navigating between an indoor starting point to an outdoor destination within an unfamiliar university campus scenario showed it to be effective in reducing end-to-end navigation times and distances of almost all participants.",
"fno": "09431138",
"keywords": [
"Global Positioning System",
"Handicapped Aids",
"Mobile Computing",
"Smart Phones",
"Traffic Engineering Computing",
"Seamless Indoor Outdoor Wayfinding System",
"Vision Impairments",
"GPS Accuracy",
"Indoor Environments",
"Common Mechanism",
"Providing Receiving Wayfinding Information",
"Great Disadvantage",
"Reliable Wayfinding System",
"Indoor Spaces",
"Outdoor Spaces",
"Satellite Based Systems",
"BVI Individual",
"GPS Based System",
"Turn By Turn Shortest Route Directions",
"Indoor Location",
"Outdoor Location",
"Indoor Wayfinding Solutions",
"GPS Signals",
"Indoor Outdoor Navigation",
"BVI Human Subjects",
"Indoor Starting Point",
"Outdoor Destination",
"City Guide Leverage",
"Pervasive Computing",
"Navigation",
"Conferences",
"Buildings",
"Indoor Environment",
"Reliability",
"Global Positioning System",
"Navigation And Wayfinding",
"Accessibility",
"Vision Impairments"
],
"authors": [
{
"affiliation": "Smith-Kettlewell Eye Research Institute,San Francisco,CA,USA",
"fullName": "Seyed Ali Cheraghi",
"givenName": "Seyed Ali",
"surname": "Cheraghi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wichita State University,Wichita,KS,USA",
"fullName": "Vinod Namboodiri",
"givenName": "Vinod",
"surname": "Namboodiri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Envision Research Institute,Wichita,KS,USA",
"fullName": "Güler Arsal",
"givenName": "Güler",
"surname": "Arsal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "percom-workshops",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "105-110",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0424-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09430931",
"articleId": "1tROWRRHcZO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09431122",
"articleId": "1tROPQhWvRK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/spacomm/2009/3694/0/3694a141",
"title": "Indoor/Outdoor Seamless Positioning Technologies Integrated on Smart Phone",
"doi": null,
"abstractUrl": "/proceedings-article/spacomm/2009/3694a141/12OmNAtK4ng",
"parentPublication": {
"id": "proceedings/spacomm/2009/3694/0",
"title": "Advances in Satellite and Space Communications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2018/2195/0/219501a550",
"title": "Indoor Trajectory Reconstruction Using Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2018/219501a550/12OmNBBQZkc",
"parentPublication": {
"id": "proceedings/aina/2018/2195/0",
"title": "2018 IEEE 32nd International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0/07847066",
"title": "An Indoor and Outdoor Seamless Positioning System Based on Android Platform",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2016/07847066/12OmNqIQSgl",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0",
"title": "2016 IEEE Trustcom/BigDataSE/ISPA",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icns/2009/3586/0/3586a370",
"title": "Global System for Location and Guidance of Disabled People: Indoor and Outdoor Technologies Integration",
"doi": null,
"abstractUrl": "/proceedings-article/icns/2009/3586a370/12OmNroijbv",
"parentPublication": {
"id": "proceedings/icns/2009/3586/0",
"title": "2009 Fifth International Conference on Networking and Services",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2009/3650/0/3650a267",
"title": "Seamless Indoor/Outdoor Positioning Handover for Location-Based Services in Streamspin",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2009/3650a267/12OmNvT2p3T",
"parentPublication": {
"id": "proceedings/mdm/2009/3650/0",
"title": "2009 Tenth International Conference on Mobile Data Management: Systems, Services and Middleware",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2012/4684/0/4684a163",
"title": "Characterization of Vision-Aided Indoor Localization and Landmark Routing",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2012/4684a163/12OmNwudQSo",
"parentPublication": {
"id": "proceedings/imis/2012/4684/0",
"title": "Innovative Mobile and Internet Services in Ubiquitous Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2016/2020/0/07498358",
"title": "Indoor data management",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2016/07498358/12OmNx3HI8h",
"parentPublication": {
"id": "proceedings/icde/2016/2020/0",
"title": "2016 IEEE 32nd International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2011/1189/0/05999159",
"title": "Self-adaptive application for indoor wayfinding for individuals with cognitive impairments",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2011/05999159/12OmNx7ouXD",
"parentPublication": {
"id": "proceedings/cbms/2011/1189/0",
"title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0/08726714",
"title": "High-Precision Indoor Localization Based on RFID and Stepscan Floor Tiles",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2018/08726714/1axfckLEVPi",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0",
"title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09431007",
"title": "PathLookup: A Deep Learning-Based Framework to Assist Visually Impaired in Outdoor Wayfinding",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09431007/1tROJgKMUqQ",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1wutzGkF9Zu",
"title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)",
"acronym": "iciddt",
"groupId": "1841164",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1wutF8bikAo",
"doi": "10.1109/ICIDDT52279.2020.00094",
"title": "The influence of slope-based navigation interface on cognition and experience in complex mountain cities",
"normalizedTitle": "The influence of slope-based navigation interface on cognition and experience in complex mountain cities",
"abstract": "Users tend to lose their sense of direction in complex mountain cities while using navigation software. In a real environment, people can apply multiple slope of perception (e.g. vision, kinesthesia and vestibular sensation) to perceive the existence of slopes [1]. Based on the concept of user perceivable slope, this study explores whether the slope information can improve the user’s experience of road finding by putting it on the interface of mobile navigation software.This study is aimed to discuss the effects of slope information used in navigation on complex slopy roads. Two hypotheses are proposed: (1) The interface with slope information will improve the efficiency of wayfinding; (2) The slope information will stimulate the multi-interaction experience and help users (pedestrians) to build stereo-space cognition. Based on those hypotheses, a control experiment using a slope information navigation interface was designed. In the experiment, 30 participants were assigned with either the original navigation software or a navigation software interface with slope information for pathfinding. It is found that the participants who use slope information navigation have higher three-dimensional cognitive ability, so they have better confidence in road finding and user experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Users tend to lose their sense of direction in complex mountain cities while using navigation software. In a real environment, people can apply multiple slope of perception (e.g. vision, kinesthesia and vestibular sensation) to perceive the existence of slopes [1]. Based on the concept of user perceivable slope, this study explores whether the slope information can improve the user’s experience of road finding by putting it on the interface of mobile navigation software.This study is aimed to discuss the effects of slope information used in navigation on complex slopy roads. Two hypotheses are proposed: (1) The interface with slope information will improve the efficiency of wayfinding; (2) The slope information will stimulate the multi-interaction experience and help users (pedestrians) to build stereo-space cognition. Based on those hypotheses, a control experiment using a slope information navigation interface was designed. In the experiment, 30 participants were assigned with either the original navigation software or a navigation software interface with slope information for pathfinding. It is found that the participants who use slope information navigation have higher three-dimensional cognitive ability, so they have better confidence in road finding and user experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Users tend to lose their sense of direction in complex mountain cities while using navigation software. In a real environment, people can apply multiple slope of perception (e.g. vision, kinesthesia and vestibular sensation) to perceive the existence of slopes [1]. Based on the concept of user perceivable slope, this study explores whether the slope information can improve the user’s experience of road finding by putting it on the interface of mobile navigation software.This study is aimed to discuss the effects of slope information used in navigation on complex slopy roads. Two hypotheses are proposed: (1) The interface with slope information will improve the efficiency of wayfinding; (2) The slope information will stimulate the multi-interaction experience and help users (pedestrians) to build stereo-space cognition. Based on those hypotheses, a control experiment using a slope information navigation interface was designed. In the experiment, 30 participants were assigned with either the original navigation software or a navigation software interface with slope information for pathfinding. It is found that the participants who use slope information navigation have higher three-dimensional cognitive ability, so they have better confidence in road finding and user experience.",
"fno": "036700a471",
"keywords": [
"Cognition",
"Geographic Information Systems",
"Mobile Computing",
"Navigation",
"Pedestrians",
"Traffic Information Systems",
"User Experience",
"User Interfaces",
"Complex Mountain Cities",
"User Perceivable Slope",
"Mobile Navigation Software",
"Slope Information",
"User Experience",
"Slope Based Navigation Interface",
"Road Finding",
"Wayfinding Efficiency",
"Multi Interaction Experience",
"Pedestrians",
"Stereo Space Cognition",
"Technological Innovation",
"Navigation",
"Roads",
"Urban Areas",
"Software",
"Cognition",
"User Experience",
"Navigation",
"Interface",
"User Experience",
"Cognitive Map",
"Slope"
],
"authors": [
{
"affiliation": "School of Art Jiangsu University,Zhenjiang,Jiangsu",
"fullName": "Hong Zhang",
"givenName": "Hong",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Art Jiangsu University,Zhenjiang,Jiangsu",
"fullName": "Rong Han",
"givenName": "Rong",
"surname": "Han",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iciddt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "471-477",
"year": "2020",
"issn": null,
"isbn": "978-1-6654-0367-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "036700a466",
"articleId": "1wutMzncItq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "036700a478",
"articleId": "1wutzOB17LW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icisce/2017/3013/0/3013a729",
"title": "The Research of Intelligent Traffic Congestion Pricing Technology Based on BaiDou Navigation and Token Payment",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2017/3013a729/12OmNAZOJXF",
"parentPublication": {
"id": "proceedings/icisce/2017/3013/0",
"title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985a797",
"title": "Developing a Transportation Support System for Vulnerable Road Users in Local Community",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985a797/12OmNCvcLGb",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457d087",
"title": "DeepNav: Learning to Navigate Large Cities",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d087/12OmNx1IwfP",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccicc/2016/3846/0/07862066",
"title": "Techniques for cognition of driving context for safe driving application",
"doi": null,
"abstractUrl": "/proceedings-article/iccicc/2016/07862066/12OmNyPQ4BV",
"parentPublication": {
"id": "proceedings/iccicc/2016/3846/0",
"title": "2016 IEEE 15th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2017/0621/0/0621a005",
"title": "Design and Development of a Web Service to Support Vulnerable Road User's Daily Life in Suburban Residential Estates in Hiroshima City",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2017/0621a005/12OmNz61dum",
"parentPublication": {
"id": "proceedings/iiai-aai/2017/0621/0",
"title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09861719",
"title": "GoComfort: Comfortable Navigation for Autonomous Vehicles Leveraging High-Precision Road Damage Crowdsensing",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09861719/1FWhWjWtWfu",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300m2530",
"title": "TOUCHDOWN: Natural Language Navigation and Spatial Reasoning in Visual Street Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300m2530/1gyrycxdSkE",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a214",
"title": "Investigating aesthetics to afford more ‘felt’ knowledge and ‘meaningful’ navigation interface designs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a214/1rSR8vAIb9C",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09430997",
"title": "Considering Spatial Cognition of Blind Travelers in Utilizing Augmented Reality for Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09430997/1tROV9fTS7u",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2021/3960/0/396000a153",
"title": "Numerical Stability Analysis of Baota Mountain in Yan'an City",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2021/396000a153/1xqyQAu6o1O",
"parentPublication": {
"id": "proceedings/icceai/2021/3960/0",
"title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBSBk6z",
"title": "2014 5th International Conference on Computing, Communication and Networking Technologies (ICCCNT)",
"acronym": "icccnt",
"groupId": "1802177",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAo45Pd",
"doi": "10.1109/ICCCNT.2014.6963044",
"title": "On minimum covering of invertible honeycomb meshes",
"normalizedTitle": "On minimum covering of invertible honeycomb meshes",
"abstract": "The problem of monitoring a network by placing a minimum number of sensor devices in the system is modelled as the vertex covering problem (VCP) in graphs. A set S of vertices of a graph G = (V, E) is called a vertex cover, if each edge in E has at least one end point in S and the minimum cardinality taken over all vertex covering sets of G is called the vertex covering number denoted by β(G). This concept has also wide applications in wireless sensor networks and in routing and fault tolerance algorithms. This paper presents the exact values of the vertex covering, edge covering and inverse covering numbers of a popular mesh-derived parallel architecture called the honeycomb mesh network. In particular, we present a characterization for invertible graphs and have shown its significance in electrical networks. In addition, a polynomial time algorithm is provided to find the minimum covering sets in honeycomb meshes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The problem of monitoring a network by placing a minimum number of sensor devices in the system is modelled as the vertex covering problem (VCP) in graphs. A set S of vertices of a graph G = (V, E) is called a vertex cover, if each edge in E has at least one end point in S and the minimum cardinality taken over all vertex covering sets of G is called the vertex covering number denoted by β(G). This concept has also wide applications in wireless sensor networks and in routing and fault tolerance algorithms. This paper presents the exact values of the vertex covering, edge covering and inverse covering numbers of a popular mesh-derived parallel architecture called the honeycomb mesh network. In particular, we present a characterization for invertible graphs and have shown its significance in electrical networks. In addition, a polynomial time algorithm is provided to find the minimum covering sets in honeycomb meshes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The problem of monitoring a network by placing a minimum number of sensor devices in the system is modelled as the vertex covering problem (VCP) in graphs. A set S of vertices of a graph G = (V, E) is called a vertex cover, if each edge in E has at least one end point in S and the minimum cardinality taken over all vertex covering sets of G is called the vertex covering number denoted by β(G). This concept has also wide applications in wireless sensor networks and in routing and fault tolerance algorithms. This paper presents the exact values of the vertex covering, edge covering and inverse covering numbers of a popular mesh-derived parallel architecture called the honeycomb mesh network. In particular, we present a characterization for invertible graphs and have shown its significance in electrical networks. In addition, a polynomial time algorithm is provided to find the minimum covering sets in honeycomb meshes.",
"fno": "06963044",
"keywords": [
"Mesh Networks",
"Approximation Methods",
"Vegetation",
"Monitoring",
"Polynomials",
"Approximation Algorithms",
"Bridges",
"Honeycomb Mesh",
"Vertex Cover",
"Edge Cover",
"Invertible Graphs"
],
"authors": [
{
"affiliation": "Sathyabama University, Chennai-600119, India",
"fullName": "D. Angel",
"givenName": "D.",
"surname": "Angel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sathyabama University, Chennai-600119, India",
"fullName": "A. Amutha",
"givenName": "A.",
"surname": "Amutha",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icccnt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-2696-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06963043",
"articleId": "12OmNzC5SL1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06963045",
"articleId": "12OmNCmpcLV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/candar/2014/4152/0/4152a068",
"title": "On Vertex Cover with Fractional Fan-Out Bound",
"doi": null,
"abstractUrl": "/proceedings-article/candar/2014/4152a068/12OmNBSBjZz",
"parentPublication": {
"id": "proceedings/candar/2014/4152/0",
"title": "2014 Second International Symposium on Computing and Networking (CANDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccc/2013/4997/0/4997a207",
"title": "Covering CSPs",
"doi": null,
"abstractUrl": "/proceedings-article/ccc/2013/4997a207/12OmNBlFQYq",
"parentPublication": {
"id": "proceedings/ccc/2013/4997/0",
"title": "2013 IEEE Conference on Computational Complexity (CCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fit/2015/9666/0/9666a277",
"title": "Clever Steady Strategy Algorithm: A Simple and Efficient Approximation Algorithm for Minimum Vertex Cover Problem",
"doi": null,
"abstractUrl": "/proceedings-article/fit/2015/9666a277/12OmNBqv2jS",
"parentPublication": {
"id": "proceedings/fit/2015/9666/0",
"title": "2015 13th International Conference on Frontiers of Information Technology (FIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicn/2014/6929/0/6929a618",
"title": "An Improved Greedy Heuristic for Unweighted Minimum Vertex Cover",
"doi": null,
"abstractUrl": "/proceedings-article/cicn/2014/6929a618/12OmNCvLXZa",
"parentPublication": {
"id": "proceedings/cicn/2014/6929/0",
"title": "2014 International Conference on Computational Intelligence and Communication Networks (CICN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981b134",
"title": "On the Minimum Hub Set Problem",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981b134/12OmNwDACde",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/2/3571b432",
"title": "Minimum Sphere Covering Theory in High Dimension Space and Its Application for Speech Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571b432/12OmNwF0BQE",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/2",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2012/5680/0/06495325",
"title": "Nonexistences of the Packing and Covering Designs for the Join Graph of K1 and C4 with a Pendent Edge",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2012/06495325/12OmNylKARG",
"parentPublication": {
"id": "proceedings/isise/2012/5680/0",
"title": "2012 Fourth International Symposium on Information Science and Engineering (ISISE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2013/09/ttc2013091684",
"title": "An Eight-Approximation Algorithm for Computing Rooted Three-Vertex Connected Minimum Steiner Networks",
"doi": null,
"abstractUrl": "/journal/tc/2013/09/ttc2013091684/13rRUNvgz8T",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/12/ttp2013122997",
"title": "A Minimum Volume Covering Approach with a Set of Ellipsoids",
"doi": null,
"abstractUrl": "/journal/tp/2013/12/ttp2013122997/13rRUwbs1TJ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/1997/10/l1036",
"title": "Honeycomb Networks: Topological Properties and Communication Algorithms",
"doi": null,
"abstractUrl": "/journal/td/1997/10/l1036/13rRUxBa5bq",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyKa5Tk",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBO3K2L",
"doi": "10.1109/ICME.2008.4607747",
"title": "Hierarchical mesh decomposition and motion tracking for Time-Varying-Meshes",
"normalizedTitle": "Hierarchical mesh decomposition and motion tracking for Time-Varying-Meshes",
"abstract": "This paper proposes a system for automatic segmentation and motion tracking of time-varying-meshes (TVM). Our approach is based on skeleton-based hierarchical mesh decomposition by distance calculation. The properties of the human skeleton structure are used to define the decomposition of each TVM frame. The proposed framework is a recursive system that iterates between automatic hierarchical decomposition on minimum distance satisfaction and skeleton realignment. This is done to achieve a stable segmentation and a refined skeleton. By utilizing color information, ill-defined meshes can be successfully segmented. Results show an average disparity of 1.39% of total surface area across all segmented parts, which indicate stability of the system. In addition, motion tracking is successfully performed through the use of refined skeletons.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a system for automatic segmentation and motion tracking of time-varying-meshes (TVM). Our approach is based on skeleton-based hierarchical mesh decomposition by distance calculation. The properties of the human skeleton structure are used to define the decomposition of each TVM frame. The proposed framework is a recursive system that iterates between automatic hierarchical decomposition on minimum distance satisfaction and skeleton realignment. This is done to achieve a stable segmentation and a refined skeleton. By utilizing color information, ill-defined meshes can be successfully segmented. Results show an average disparity of 1.39% of total surface area across all segmented parts, which indicate stability of the system. In addition, motion tracking is successfully performed through the use of refined skeletons.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a system for automatic segmentation and motion tracking of time-varying-meshes (TVM). Our approach is based on skeleton-based hierarchical mesh decomposition by distance calculation. The properties of the human skeleton structure are used to define the decomposition of each TVM frame. The proposed framework is a recursive system that iterates between automatic hierarchical decomposition on minimum distance satisfaction and skeleton realignment. This is done to achieve a stable segmentation and a refined skeleton. By utilizing color information, ill-defined meshes can be successfully segmented. Results show an average disparity of 1.39% of total surface area across all segmented parts, which indicate stability of the system. In addition, motion tracking is successfully performed through the use of refined skeletons.",
"fno": "04607747",
"keywords": [
"Image Colour Analysis",
"Image Motion Analysis",
"Image Segmentation",
"Iterative Methods",
"Motion Tracking",
"Time Varying Meshes",
"Automatic Segmentation",
"Skeleton Based Hierarchical Mesh Decomposition",
"Recursive System",
"Color Information",
"Skeleton",
"Motion Segmentation",
"Tracking",
"Distance Measurement",
"Joints",
"Kinetic Theory",
"Three Dimensional Displays",
"Time Varying Meshes",
"Segmentation"
],
"authors": [
{
"affiliation": "Dept of Electronic Engineering, The University of Tokyo, Japan",
"fullName": "Ning Sung Lee",
"givenName": null,
"surname": "Ning Sung Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept of Information and Communication Engineering, Japan",
"fullName": "Toshihiko Yamasaki",
"givenName": null,
"surname": "Toshihiko Yamasaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept of Information and Communication Engineering, Japan",
"fullName": "Kiyoharu Aizawa",
"givenName": null,
"surname": "Kiyoharu Aizawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-06-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1945-7871",
"isbn": "978-1-4244-2570-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04607746",
"articleId": "12OmNzQzqhj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04607748",
"articleId": "12OmNqFrGxF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iih-msp/2007/2994/1/29940353",
"title": "WYSIWYG: Mesh Decomposition for Static Models",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2007/29940353/12OmNAolGZk",
"parentPublication": {
"id": "iih-msp/2007/2994/1",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118233",
"title": "Morphological skeleton and shape decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118233/12OmNB06l8L",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a371",
"title": "An Adaptive Hierarchical Approach to the Extraction of High Resolution Medial Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a371/12OmNqAU6yz",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761011",
"title": "Fast and precise kinematic skeleton extraction of 3D dynamic meshes",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761011/12OmNqBbHPx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdee/2010/4332/0/4332a354",
"title": "Line-Skeleton Extraction of 3D Meshes Based on Geometry Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cdee/2010/4332a354/12OmNscxj8a",
"parentPublication": {
"id": "proceedings/cdee/2010/4332/0",
"title": "Cryptography, and Network Security, Data Mining and Knowledge Discovery, E-Commerce and Its Applications, and Embedded Systems, IACIS International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04285074",
"title": "Fast and Robust Motion Tracking for Time-Varying Mesh Featuring Reeb-Graph-Based Skeleton Fitting and its Application to Motion Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04285074/12OmNx19jWi",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2014/4677/0/4677a130",
"title": "Automatic Generation of Skeleton Animation from 3D Human Mesh Model",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2014/4677a130/12OmNzayNus",
"parentPublication": {
"id": "proceedings/cw/2014/4677/0",
"title": "2014 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2001/11/i1296",
"title": "Hierarchical Decomposition of Multiscale Skeletons",
"doi": null,
"abstractUrl": "/journal/tp/2001/11/i1296/13rRUILc8g5",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a240",
"title": "Action Recognition Based on Fusion Skeleton of Two Kinect Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a240/1p1gnv7TMI0",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413189",
"title": "Vertex Feature Encoding and Hierarchical Temporal Modeling in a Spatio-Temporal Graph Convolutional Network for Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413189/1tmirrgQbBu",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx4gUtP",
"title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"acronym": "sibgrapi",
"groupId": "1000131",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxWcHfu",
"doi": "10.1109/SIBGRAPI.2017.12",
"title": "Repairing Non-Manifold Boundaries of Segmented Simplicial Meshes",
"normalizedTitle": "Repairing Non-Manifold Boundaries of Segmented Simplicial Meshes",
"abstract": "A digital image may contain objects that can be made up of multiple regions concerning different material properties, physical or chemical attributes. Thus, segmented simplicial meshes with non-manifold boundaries are generated to represent the partitioned regions. We focus on repairing non-manifold boundaries. Current methods modify the topology, geometry or both, using their own data structures. The problem of modifying the topology is that if the mesh has to be post-processed, for instance with the Delaunay refinement, the mesh becomes unsuitable. In this paper, we propose alternatives to repair non-manifold boundaries of segmented simplicial meshes, among them is the Delaunay based one, we use common data structures and only consider 2 and 3 dimensions. We developed algorithms for this purpose, composed of the following tools: relabeling, point insertion and simulated annealing. These algorithms are applied depending on the targeted contexts, if we want to speed the process, keep as possible the original segmented mesh or keep the number of elements in the mesh.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A digital image may contain objects that can be made up of multiple regions concerning different material properties, physical or chemical attributes. Thus, segmented simplicial meshes with non-manifold boundaries are generated to represent the partitioned regions. We focus on repairing non-manifold boundaries. Current methods modify the topology, geometry or both, using their own data structures. The problem of modifying the topology is that if the mesh has to be post-processed, for instance with the Delaunay refinement, the mesh becomes unsuitable. In this paper, we propose alternatives to repair non-manifold boundaries of segmented simplicial meshes, among them is the Delaunay based one, we use common data structures and only consider 2 and 3 dimensions. We developed algorithms for this purpose, composed of the following tools: relabeling, point insertion and simulated annealing. These algorithms are applied depending on the targeted contexts, if we want to speed the process, keep as possible the original segmented mesh or keep the number of elements in the mesh.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A digital image may contain objects that can be made up of multiple regions concerning different material properties, physical or chemical attributes. Thus, segmented simplicial meshes with non-manifold boundaries are generated to represent the partitioned regions. We focus on repairing non-manifold boundaries. Current methods modify the topology, geometry or both, using their own data structures. The problem of modifying the topology is that if the mesh has to be post-processed, for instance with the Delaunay refinement, the mesh becomes unsuitable. In this paper, we propose alternatives to repair non-manifold boundaries of segmented simplicial meshes, among them is the Delaunay based one, we use common data structures and only consider 2 and 3 dimensions. We developed algorithms for this purpose, composed of the following tools: relabeling, point insertion and simulated annealing. These algorithms are applied depending on the targeted contexts, if we want to speed the process, keep as possible the original segmented mesh or keep the number of elements in the mesh.",
"fno": "2219a039",
"keywords": [
"Data Structures",
"Geometry",
"Image Segmentation",
"Mesh Generation",
"Simulated Annealing",
"Segmented Simplicial Meshes",
"Physical Attributes",
"Chemical Attributes",
"Partitioned Regions",
"Material Properties",
"Nonmanifold Boundaries Repair",
"Original Segmented Mesh",
"Digital Image",
"Data Structures",
"Delaunay Refinement",
"Point Insertion",
"Simulated Annealing",
"Manifolds",
"Three Dimensional Displays",
"Image Segmentation",
"Data Structures",
"Maintenance Engineering",
"Simulated Annealing",
"Cavity Resonators",
"Computational Geometry",
"Computer Graphics",
"Manifold"
],
"authors": [
{
"affiliation": "Univ. Catolica San Pablo, Arequipa, Peru",
"fullName": "Tony Liedyn Choque Ramos",
"givenName": "Tony Liedyn",
"surname": "Choque Ramos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. Catolica San Pablo, Arequipa, Peru",
"fullName": "Alex Jesus Cuadros Vargas",
"givenName": "Alex Jesus",
"surname": "Cuadros Vargas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sibgrapi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "39-46",
"year": "2017",
"issn": "2377-5416",
"isbn": "978-1-5386-2219-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2219a031",
"articleId": "12OmNy6qfJ2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2219a047",
"articleId": "12OmNBPc8uP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fcst/2010/7779/0/05575782",
"title": "The Geometric and Electrostatic Properties of Binding Cavities and Their Usage in Protein-Ligand Docking",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2010/05575782/12OmNBgz4Ax",
"parentPublication": {
"id": "proceedings/fcst/2010/7779/0",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477650",
"title": "Automatic 3D reconstruction of manifold meshes via delaunay triangulation and mesh sweeping",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477650/12OmNwEJ0RT",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-euc/2013/5088/0/06832124",
"title": "A New Algorithm for Repairing Non-manifold Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-euc/2013/06832124/12OmNx965wy",
"parentPublication": {
"id": "proceedings/hpcc-euc/2013/5088/0",
"title": "2013 IEEE International Conference on High Performance Computing and Communications (HPCC) & 2013 IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660053",
"title": "Reconstructing Manifold and Non-Manifold Surfaces from Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660053/12OmNxbmSzt",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720354",
"title": "Tuning Manifold Harmonics Filters",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720354/12OmNzBOhIQ",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/05/ttp2008050796",
"title": "Riemannian Manifold Learning",
"doi": null,
"abstractUrl": "/journal/tp/2008/05/ttp2008050796/13rRUEgarkz",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1255",
"title": "Manifold Learning Benefits GANs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1255/1H1jst5lvJm",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047345",
"title": "Medical Image Registration Based on Moving Manifold Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047345/1iC6B00vmTe",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413010",
"title": "On-manifold Adversarial Data Augmentation Improves Uncertainty Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413010/1tmj0SEORmE",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09540267",
"title": "Manifold-Constrained Geometric Optimization via Local Parameterizations",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09540267/1wWCiSASJB6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNykCcdo",
"title": "2018 IEEE Pacific Visualization Symposium (PacificVis)",
"acronym": "pacificvis",
"groupId": "1001657",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzG4gup",
"doi": "10.1109/PacificVis.2018.00010",
"title": "Optimal Algorithms for Compact Linear Layouts",
"normalizedTitle": "Optimal Algorithms for Compact Linear Layouts",
"abstract": "Linear layouts are a simple and natural way to draw a graph: all vertices are placed on a single line and edges are drawn as arcs between the vertices. Despite its simplicity, a linear layout can be a very meaningful visualization if there is a particular order defined on the vertices. Common examples of such ordered - and often also directed - graphs are event sequences and processes. A main drawback of linear layouts are the usually (very) large aspect ratios of the resulting drawings, which prevent users from obtaining a good overview of the whole graph. In this paper we present a novel and versatile algorithm to optimally fold a linear layout of a graph such that it can be drawn nicely in a specified aspect ratio, while still clearly communicating the linearity of the layout. Our algorithm allows vertices to be drawn as blocks or rectangles of specified sizes to incorporate different drawing styles, label sizes, and even recursive structures. For reasonably-sized drawings the folded layout can be computed interactively. We demonstrate the applicability of our algorithm on graphs that represent process trees, a particular type of process model. Our algorithm arguably produces much more readable layouts than existing methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Linear layouts are a simple and natural way to draw a graph: all vertices are placed on a single line and edges are drawn as arcs between the vertices. Despite its simplicity, a linear layout can be a very meaningful visualization if there is a particular order defined on the vertices. Common examples of such ordered - and often also directed - graphs are event sequences and processes. A main drawback of linear layouts are the usually (very) large aspect ratios of the resulting drawings, which prevent users from obtaining a good overview of the whole graph. In this paper we present a novel and versatile algorithm to optimally fold a linear layout of a graph such that it can be drawn nicely in a specified aspect ratio, while still clearly communicating the linearity of the layout. Our algorithm allows vertices to be drawn as blocks or rectangles of specified sizes to incorporate different drawing styles, label sizes, and even recursive structures. For reasonably-sized drawings the folded layout can be computed interactively. We demonstrate the applicability of our algorithm on graphs that represent process trees, a particular type of process model. Our algorithm arguably produces much more readable layouts than existing methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Linear layouts are a simple and natural way to draw a graph: all vertices are placed on a single line and edges are drawn as arcs between the vertices. Despite its simplicity, a linear layout can be a very meaningful visualization if there is a particular order defined on the vertices. Common examples of such ordered - and often also directed - graphs are event sequences and processes. A main drawback of linear layouts are the usually (very) large aspect ratios of the resulting drawings, which prevent users from obtaining a good overview of the whole graph. In this paper we present a novel and versatile algorithm to optimally fold a linear layout of a graph such that it can be drawn nicely in a specified aspect ratio, while still clearly communicating the linearity of the layout. Our algorithm allows vertices to be drawn as blocks or rectangles of specified sizes to incorporate different drawing styles, label sizes, and even recursive structures. For reasonably-sized drawings the folded layout can be computed interactively. We demonstrate the applicability of our algorithm on graphs that represent process trees, a particular type of process model. Our algorithm arguably produces much more readable layouts than existing methods.",
"fno": "142401a001",
"keywords": [
"Graph Theory",
"Trees Mathematics",
"Optimal Algorithms",
"Compact Linear Layouts",
"Vertices",
"Graphs",
"Readable Layouts",
"Specified Aspect Ratio",
"Drawing Styles",
"Label Sizes",
"Recursive Structures",
"Folded Layout",
"Reasonably Sized Drawings",
"Layout",
"Data Visualization",
"Strips",
"Approximation Algorithms",
"Shape",
"Business",
"Connectors",
"Graph Network Data",
"Geometry Based Techniques",
"Linear Layouts",
"Folding"
],
"authors": [
{
"affiliation": null,
"fullName": "Willem Sonke",
"givenName": "Willem",
"surname": "Sonke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kevin Verbeek",
"givenName": "Kevin",
"surname": "Verbeek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wouter Meulemans",
"givenName": "Wouter",
"surname": "Meulemans",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Eric Verbeek",
"givenName": "Eric",
"surname": "Verbeek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bettina Speckmann",
"givenName": "Bettina",
"surname": "Speckmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pacificvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-10",
"year": "2018",
"issn": "2165-8773",
"isbn": "978-1-5386-1424-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "142401z018",
"articleId": "12OmNzTYC0O",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "142401a011",
"articleId": "12OmNArbG31",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccd/1990/2079/0/00130276",
"title": "A linear time algorithm for optimal CMOS functional cell layouts",
"doi": null,
"abstractUrl": "/proceedings-article/iccd/1990/00130276/12OmNANBZpL",
"parentPublication": {
"id": "proceedings/iccd/1990/2079/0",
"title": "Proceedings., 1990 IEEE International Conference on Computer Design: VLSI in Computers and Processors",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2014/2555/0/06816686",
"title": "Optimal hierarchical layouts for cache-oblivious search trees",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2014/06816686/12OmNCgrDbD",
"parentPublication": {
"id": "proceedings/icde/2014/2555/0",
"title": "2014 IEEE 30th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/1980/5428/0/542800270",
"title": "Area-efficient graph layouts",
"doi": null,
"abstractUrl": "/proceedings-article/focs/1980/542800270/12OmNqIQSlj",
"parentPublication": {
"id": "proceedings/focs/1980/5428/0",
"title": "21st Annual Symposium on Foundations of Computer Science (sfcs 1980)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/host/2017/3929/0/07951833",
"title": "On designing optimal camouflaged layouts",
"doi": null,
"abstractUrl": "/proceedings-article/host/2017/07951833/12OmNy4IF0P",
"parentPublication": {
"id": "proceedings/host/2017/3929/0",
"title": "2017 IEEE International Symposium on Hardware Oriented Security and Trust (HOST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2005/876/0/04118540",
"title": "Meaningful presentations of photo libraries: rationale and applications of bi-level radial quantum layouts",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2005/04118540/12OmNyvGyig",
"parentPublication": {
"id": "proceedings/jcdl/2005/876/0",
"title": "Proceedings of the 5th ACM/IEEE Joint Conference on Digital Libraries",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009060961",
"title": "A Comparison of User-Generated and Automatic Graph Layouts",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009060961/13rRUwbaqLs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1994/05/t0581",
"title": "Wiring Knock-Knee Layouts: A Global Approach",
"doi": null,
"abstractUrl": "/journal/tc/1994/05/t0581/13rRUwgyOic",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/1999/02/l0115",
"title": "A Linear Algebra Framework for Automatic Determination of Optimal Data Layouts",
"doi": null,
"abstractUrl": "/journal/td/1999/02/l0115/13rRUxBJhmm",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/07/08948239",
"title": "LayoutGAN: Synthesizing Graphic Layouts With Vector-Wireframe Adversarial Networks",
"doi": null,
"abstractUrl": "/journal/tp/2021/07/08948239/1geNB7KG1eE",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5804",
"title": "Magic Layouts: Structural Prior for Component Detection in User Interface Designs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5804/1yeLwWp0lMY",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrIJqwx",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"acronym": "icdh",
"groupId": "1802037",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzaQozi",
"doi": "10.1109/ICDH.2014.64",
"title": "Semantic Segmentation and Labeling of 3D Garments",
"normalizedTitle": "Semantic Segmentation and Labeling of 3D Garments",
"abstract": "As large collections of 3D garments continue to grow, analyzing and exploring shape variations is significant but challenging. In this paper, we propose a semi-supervised learning method for semantic segmentation and labeling of 3D garments. The key idea in this work is to address the data challenge for 3D garment analysis using semi-supervised learning method which can label parts in various 3D garments. We first develop an objective function based on Conditional Random Field (CRF) model to learn the prior knowledge of garment components from a set of training examples. Then, we segment 3D garments into five component prototypes related to top, bottom, sleeve, accessory and one-piece, respectively. And we modify the Joint Boost to automatically cluster the segmented components without requiring manual parameter tuning. The purpose of our method is to relieve the manual segmentation and labeling of components in 3D garment collections. The experimental results demonstrate our method is effective and comparable to human work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As large collections of 3D garments continue to grow, analyzing and exploring shape variations is significant but challenging. In this paper, we propose a semi-supervised learning method for semantic segmentation and labeling of 3D garments. The key idea in this work is to address the data challenge for 3D garment analysis using semi-supervised learning method which can label parts in various 3D garments. We first develop an objective function based on Conditional Random Field (CRF) model to learn the prior knowledge of garment components from a set of training examples. Then, we segment 3D garments into five component prototypes related to top, bottom, sleeve, accessory and one-piece, respectively. And we modify the Joint Boost to automatically cluster the segmented components without requiring manual parameter tuning. The purpose of our method is to relieve the manual segmentation and labeling of components in 3D garment collections. The experimental results demonstrate our method is effective and comparable to human work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As large collections of 3D garments continue to grow, analyzing and exploring shape variations is significant but challenging. In this paper, we propose a semi-supervised learning method for semantic segmentation and labeling of 3D garments. The key idea in this work is to address the data challenge for 3D garment analysis using semi-supervised learning method which can label parts in various 3D garments. We first develop an objective function based on Conditional Random Field (CRF) model to learn the prior knowledge of garment components from a set of training examples. Then, we segment 3D garments into five component prototypes related to top, bottom, sleeve, accessory and one-piece, respectively. And we modify the Joint Boost to automatically cluster the segmented components without requiring manual parameter tuning. The purpose of our method is to relieve the manual segmentation and labeling of components in 3D garment collections. The experimental results demonstrate our method is effective and comparable to human work.",
"fno": "4284a299",
"keywords": [
"Clothing",
"Shape",
"Three Dimensional Displays",
"Training",
"Semantics",
"Semisupervised Learning",
"Labeling",
"Shape Clustering",
"3 D Garments",
"Mesh Segmentation",
"Shape Analysis",
"Semi Supervised Learning"
],
"authors": [
{
"affiliation": null,
"fullName": "Li Liu",
"givenName": "Li",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ruomei Wang",
"givenName": "Ruomei",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fan Zhou",
"givenName": "Fan",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhuo Su",
"givenName": "Zhuo",
"surname": "Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaodong Fu",
"givenName": "Xiaodong",
"surname": "Fu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdh",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-11-01T00:00:00",
"pubType": "proceedings",
"pages": "299-304",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4284-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4284a293",
"articleId": "12OmNBEGYLJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4284a305",
"articleId": "12OmNxEBz68",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2016/2312/0/2312a323",
"title": "Human 3D Garment Modeling Method Based on Surface Modeling Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a323/12OmNAoUTvY",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2015/6683/0/6683b068",
"title": "Semantic Instance Labeling Leveraging Hierarchical Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683b068/12OmNBTawwj",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09667070",
"title": "UV-based reconstruction of 3D garments from a single RGB image",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667070/1A6BtIpyi88",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scset/2022/7876/0/787600a204",
"title": "Research on Improving Garment Fit through CLO 3D Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/scset/2022/787600a204/1ANM2D8v5MA",
"parentPublication": {
"id": "proceedings/scset/2022/7876/0",
"title": "2022 International Seminar on Computer Science and Engineering Technology (SCSET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d304",
"title": "GarmentNets: Category-Level Pose Estimation for Garments via Canonical Space Shape Completion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d304/1BmILRj6ydO",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i130",
"title": "SNUG: Self-Supervised Neural Dynamic Garments",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i130/1H1mT62lup2",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300f419",
"title": "Multi-Garment Net: Learning to Dress 3D People From Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300f419/1hVlwZpXtZK",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h021",
"title": "Learning to Transfer Texture From Clothing Images to 3D Humans",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h021/1m3odfHuXF6",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100b416",
"title": "DeepDraper: Fast and Accurate 3D Garment Draping over a 3D Human Body",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100b416/1yNirBDl984",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1758",
"title": "Self-Supervised Collision Handling via Generative 3D Garment Models for Virtual Try-On",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1758/1yeLegPnV2E",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmGuxEHMIg",
"doi": "10.1109/ICCV48922.2021.01362",
"title": "Learning Generative Models of Textured 3D Meshes from Real-World Images",
"normalizedTitle": "Learning Generative Models of Textured 3D Meshes from Real-World Images",
"abstract": "Recent advances in differentiable rendering have sparked an interest in learning generative models of textured 3D meshes from image collections. These models natively disentangle pose and appearance, enable downstream applications in computer graphics, and improve the ability of generative models to understand the concept of image formation. Although there has been prior work on learning such models from collections of 2D images, these approaches require a delicate pose estimation step that exploits annotated keypoints, thereby restricting their applicability to a few specific datasets. In this work, we propose a GAN framework for generating textured triangle meshes without relying on such annotations. We show that the performance of our approach is on par with prior work that relies on ground-truth keypoints, and more importantly, we demonstrate the generality of our method by setting new baselines on a larger set of categories from ImageNet–for which keypoints are not available–without any class-specific hyperparameter tuning. We release our code at https://github.com/dariopavllo/textured-3d-gan",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent advances in differentiable rendering have sparked an interest in learning generative models of textured 3D meshes from image collections. These models natively disentangle pose and appearance, enable downstream applications in computer graphics, and improve the ability of generative models to understand the concept of image formation. Although there has been prior work on learning such models from collections of 2D images, these approaches require a delicate pose estimation step that exploits annotated keypoints, thereby restricting their applicability to a few specific datasets. In this work, we propose a GAN framework for generating textured triangle meshes without relying on such annotations. We show that the performance of our approach is on par with prior work that relies on ground-truth keypoints, and more importantly, we demonstrate the generality of our method by setting new baselines on a larger set of categories from ImageNet–for which keypoints are not available–without any class-specific hyperparameter tuning. We release our code at https://github.com/dariopavllo/textured-3d-gan",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent advances in differentiable rendering have sparked an interest in learning generative models of textured 3D meshes from image collections. These models natively disentangle pose and appearance, enable downstream applications in computer graphics, and improve the ability of generative models to understand the concept of image formation. Although there has been prior work on learning such models from collections of 2D images, these approaches require a delicate pose estimation step that exploits annotated keypoints, thereby restricting their applicability to a few specific datasets. In this work, we propose a GAN framework for generating textured triangle meshes without relying on such annotations. We show that the performance of our approach is on par with prior work that relies on ground-truth keypoints, and more importantly, we demonstrate the generality of our method by setting new baselines on a larger set of categories from ImageNet–for which keypoints are not available–without any class-specific hyperparameter tuning. We release our code at https://github.com/dariopavllo/textured-3d-gan",
"fno": "281200n3859",
"keywords": [
"Solid Modeling",
"Computer Vision",
"Three Dimensional Displays",
"Codes",
"Annotations",
"Computational Modeling",
"Semantics",
"Image And Video Synthesis",
"3 D From A Single Image And Shape From X",
"Neural Generative Models"
],
"authors": [
{
"affiliation": "ETH Zurich,Department of Computer Science",
"fullName": "Dario Pavllo",
"givenName": "Dario",
"surname": "Pavllo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zurich,Department of Computer Science",
"fullName": "Jonas Kohler",
"givenName": "Jonas",
"surname": "Kohler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zurich,Department of Computer Science",
"fullName": "Thomas Hofmann",
"givenName": "Thomas",
"surname": "Hofmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zurich,Department of Computer Science",
"fullName": "Aurelien Lucchi",
"givenName": "Aurelien",
"surname": "Lucchi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "13859-13869",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200n3849",
"articleId": "1BmGbF5vKOQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200n3870",
"articleId": "1BmGhEwLQL6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pbg/2005/20/0/01500326",
"title": "Conversion of point-sampled models to textured meshes",
"doi": null,
"abstractUrl": "/proceedings-article/pbg/2005/01500326/12OmNxzMnLl",
"parentPublication": {
"id": "proceedings/pbg/2005/20/0",
"title": "Point-Based Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546154",
"title": "Generation Textured Contact Lenses Iris Images Based on 4DCycle-GAN",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546154/17D45VtKitg",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1037",
"title": "Keypoint Communities",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1037/1BmKw9yg61G",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a342",
"title": "3inGAN: Learning a 3D Generative Model from Images of a Self-similar Scene",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a342/1KYsw4zjeUw",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a592",
"title": "PointInverter: Point Cloud Reconstruction and Editing via a Generative Model with Shape Priors",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a592/1L8qlBdr5q8",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300f447",
"title": "ApolloCar3D: A Large 3D Car Instance Understanding Benchmark for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300f447/1gyrg03LSko",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g468",
"title": "Learning to Dress 3D People in Generative Clothing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g468/1m3nwUHFD68",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h495",
"title": "Leveraging 2D Data to Learn Textured 3D Mesh Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h495/1m3oaI2b0di",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h917",
"title": "Regularizing Generative Adversarial Networks under Limited Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h917/1yeI9dhstfq",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h868",
"title": "Hijack-GAN: Unintended-Use of Pretrained, Black-Box GANs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h868/1yeKagHNC7e",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1i5mkDyiIUg",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1i5mrYRaXwk",
"doi": "10.1109/ICCVW.2019.00496",
"title": "Auto-Encoding Meshes of any Topology with the Current-Splatting and Exponentiation Layers",
"normalizedTitle": "Auto-Encoding Meshes of any Topology with the Current-Splatting and Exponentiation Layers",
"abstract": "Deep learning has met key applications in image computing, but still lacks processing paradigms for meshes, i.e. collections of elementary geometrical parts such as points, segments or triangles. Meshes are both a powerful representation for geometrical objects, and a challenge for network architectures because of their inherent irregular structure. This work contributes to adapt classical deep learning paradigms to this particular type of data in three ways. First, we introduce the current-splatting layer which embeds meshes in a metric space, allowing the downstream network to process them without any assumption on their topology: they may be composed of varied numbers of elements or connected components, contain holes, or bear high levels of geometrical noise. Second, we adapt to meshes the exponentiation layer which, from an upstream image array, generates shapes with a diffeomorphic control over their topology. Third, we take advantage of those layers to devise a variational auto-encoding architecture, which we interpret as a generative statistical model that learns adapted low-dimensional representations for mesh data sets. An explicit norm-control layer ensures the correspondence between the latent-space Euclidean metric and the shape-space log-Euclidean one. We illustrate this method on simulated and real data sets, and show the practical relevance of the learned representation for visualization, classification and mesh synthesis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Deep learning has met key applications in image computing, but still lacks processing paradigms for meshes, i.e. collections of elementary geometrical parts such as points, segments or triangles. Meshes are both a powerful representation for geometrical objects, and a challenge for network architectures because of their inherent irregular structure. This work contributes to adapt classical deep learning paradigms to this particular type of data in three ways. First, we introduce the current-splatting layer which embeds meshes in a metric space, allowing the downstream network to process them without any assumption on their topology: they may be composed of varied numbers of elements or connected components, contain holes, or bear high levels of geometrical noise. Second, we adapt to meshes the exponentiation layer which, from an upstream image array, generates shapes with a diffeomorphic control over their topology. Third, we take advantage of those layers to devise a variational auto-encoding architecture, which we interpret as a generative statistical model that learns adapted low-dimensional representations for mesh data sets. An explicit norm-control layer ensures the correspondence between the latent-space Euclidean metric and the shape-space log-Euclidean one. We illustrate this method on simulated and real data sets, and show the practical relevance of the learned representation for visualization, classification and mesh synthesis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Deep learning has met key applications in image computing, but still lacks processing paradigms for meshes, i.e. collections of elementary geometrical parts such as points, segments or triangles. Meshes are both a powerful representation for geometrical objects, and a challenge for network architectures because of their inherent irregular structure. This work contributes to adapt classical deep learning paradigms to this particular type of data in three ways. First, we introduce the current-splatting layer which embeds meshes in a metric space, allowing the downstream network to process them without any assumption on their topology: they may be composed of varied numbers of elements or connected components, contain holes, or bear high levels of geometrical noise. Second, we adapt to meshes the exponentiation layer which, from an upstream image array, generates shapes with a diffeomorphic control over their topology. Third, we take advantage of those layers to devise a variational auto-encoding architecture, which we interpret as a generative statistical model that learns adapted low-dimensional representations for mesh data sets. An explicit norm-control layer ensures the correspondence between the latent-space Euclidean metric and the shape-space log-Euclidean one. We illustrate this method on simulated and real data sets, and show the practical relevance of the learned representation for visualization, classification and mesh synthesis.",
"fno": "09022413",
"keywords": [
"Computational Geometry",
"Data Structures",
"Graph Theory",
"Image Representation",
"Learning Artificial Intelligence",
"Exponentiation Layer",
"Upstream Image Array",
"Variational Auto Encoding Architecture",
"Mesh Data Sets",
"Explicit Norm Control Layer",
"Shape Space Log Euclidean",
"Auto Encoding Meshes",
"Image Computing",
"Elementary Geometrical Parts",
"Geometrical Objects",
"Network Architectures",
"Inherent Irregular Structure",
"Deep Learning",
"Geometrical Noise",
"Current Splatting Layer",
"Generative Statistical Model",
"Low Dimensional Representations",
"Latent Space Euclidean Metric",
"Topology",
"Shape",
"Kernel",
"Computer Architecture",
"Measurement",
"Three Dimensional Displays",
"Feature Extraction",
"Auto Encoders",
"Morphometry",
"Currents",
"Exponentiation",
"Shape Analysis",
"Topology"
],
"authors": [
{
"affiliation": "Brain and Spine Institute, France",
"fullName": "Alexandre Bône",
"givenName": "Alexandre",
"surname": "Bône",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Brain and Spine Institute, France",
"fullName": "Olivier Colliot",
"givenName": "Olivier",
"surname": "Colliot",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Brain and Spine Institute, France",
"fullName": "Stanley Durrleman",
"givenName": "Stanley",
"surname": "Durrleman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "4014-4023",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5023-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09022240",
"articleId": "1i5mofbEPzG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09022017",
"articleId": "1i5mMluVUje",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdh/2016/4400/0/4400a296",
"title": "Detection of Imbalanced Vertices in 3D Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2016/4400a296/12OmNArKSkh",
"parentPublication": {
"id": "proceedings/icdh/2016/4400/0",
"title": "2016 6th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmip/2017/5954/0/5954a315",
"title": "A Polygon Meshes Compression Algorithm Based on ASCII and Interval Coding",
"doi": null,
"abstractUrl": "/proceedings-article/icmip/2017/5954a315/12OmNCbU395",
"parentPublication": {
"id": "proceedings/icmip/2017/5954/0",
"title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012141",
"title": "Topological synchronization mechanism for robust watermarking on 3D semi-regular meshes",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012141/12OmNvIxeYK",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a318",
"title": "Variational Building Modeling from Urban MVS Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a318/12OmNvRU0p0",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2016/4571/0/4571a247",
"title": "Reconstruction of High Resolution 3D Meshes of Lung Geometry from HRCT Contours",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a247/12OmNxG1yNS",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2018/1424/0/142401a235",
"title": "An Evolutionary Signature for Animated Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2018/142401a235/12OmNxGAL6M",
"parentPublication": {
"id": "proceedings/pacificvis/2018/1424/0",
"title": "2018 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2018/7568/0/08705096",
"title": "An effective method for hole filling in 3D triangular meshes",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2018/08705096/19RSKn8djJm",
"parentPublication": {
"id": "proceedings/isspit/2018/7568/0",
"title": "2018 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3482",
"title": "Text2Mesh: Text-Driven Neural Stylization for Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3482/1H1hBnpgbAI",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09159927",
"title": "Learning on 3D Meshes With Laplacian Encoding and Pooling",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09159927/1m3m77L2v3a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i609",
"title": "DualConvMesh-Net: Joint Geodesic and Euclidean Convolutions on 3D Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i609/1m3nzTemaGs",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1lPGXn8hEiI",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1lPHfRRaKM8",
"doi": "10.1109/CVPRW50498.2020.00182",
"title": "Semi-supervised 3D Face Representation Learning from Unconstrained Photo Collections",
"normalizedTitle": "Semi-supervised 3D Face Representation Learning from Unconstrained Photo Collections",
"abstract": "Recovering 3D geometry shape, albedo, and lighting from a single image is a typical ill-posed problem. To address this challenging problem, we propose to utilize the joint constraints from unconstrained photo collections of one person to recover his or her identity shape and albedo. Unconstrained photo collections include one's photos captured under different times, backgrounds, and expressions, e.g., photos posted on Instagram. We train our model in a semi-supervised manner with adversarial loss to exploit large amounts of unconstrained facial images. A novel center loss is introduced to make sure that facial images from the same subject have the same identity shape and albedo. Besides, our proposed model disentangles identity, expression, pose, and lighting representations, which improves the overall reconstruction performance and facilitates facial editing applications, e.g., expression transfer. Comprehensive experiments demonstrate that our model produces high-quality reconstruction compared to state-of-the-art methods and is robust to various expression, pose, and lighting conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recovering 3D geometry shape, albedo, and lighting from a single image is a typical ill-posed problem. To address this challenging problem, we propose to utilize the joint constraints from unconstrained photo collections of one person to recover his or her identity shape and albedo. Unconstrained photo collections include one's photos captured under different times, backgrounds, and expressions, e.g., photos posted on Instagram. We train our model in a semi-supervised manner with adversarial loss to exploit large amounts of unconstrained facial images. A novel center loss is introduced to make sure that facial images from the same subject have the same identity shape and albedo. Besides, our proposed model disentangles identity, expression, pose, and lighting representations, which improves the overall reconstruction performance and facilitates facial editing applications, e.g., expression transfer. Comprehensive experiments demonstrate that our model produces high-quality reconstruction compared to state-of-the-art methods and is robust to various expression, pose, and lighting conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recovering 3D geometry shape, albedo, and lighting from a single image is a typical ill-posed problem. To address this challenging problem, we propose to utilize the joint constraints from unconstrained photo collections of one person to recover his or her identity shape and albedo. Unconstrained photo collections include one's photos captured under different times, backgrounds, and expressions, e.g., photos posted on Instagram. We train our model in a semi-supervised manner with adversarial loss to exploit large amounts of unconstrained facial images. A novel center loss is introduced to make sure that facial images from the same subject have the same identity shape and albedo. Besides, our proposed model disentangles identity, expression, pose, and lighting representations, which improves the overall reconstruction performance and facilitates facial editing applications, e.g., expression transfer. Comprehensive experiments demonstrate that our model produces high-quality reconstruction compared to state-of-the-art methods and is robust to various expression, pose, and lighting conditions.",
"fno": "09151077",
"keywords": [
"Face Recognition",
"Image Reconstruction",
"Image Representation",
"Learning Artificial Intelligence",
"3 D Geometry Shape",
"Unconstrained Photo Collections",
"Unconstrained Facial Images",
"Semisupervised 3 D Face Representation Learning",
"Reconstruction Performance",
"Face",
"Shape",
"Three Dimensional Displays",
"Lighting",
"Image Reconstruction",
"Solid Modeling",
"Decoding"
],
"authors": [
{
"affiliation": "Shanghai Jiao Tong University,Artificial Intelligence Institute",
"fullName": "Zhongpai Gao",
"givenName": "Zhongpai",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Juyong Zhang",
"givenName": "Juyong",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Science and Technology of China",
"fullName": "Yudong Guo",
"givenName": "Yudong",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Artificial Intelligence Institute",
"fullName": "Chao Ma",
"givenName": "Chao",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Artificial Intelligence Institute",
"fullName": "Guangtao Zhai",
"givenName": "Guangtao",
"surname": "Zhai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,Artificial Intelligence Institute",
"fullName": "Xiaokang Yang",
"givenName": "Xiaokang",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1426-1435",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9360-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09151079",
"articleId": "1lPH7aeA5BS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09150596",
"articleId": "1lPHyEG8cqA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/07780824",
"title": "Adaptive 3D Face Reconstruction from Unconstrained Photo Collections",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/07780824/12OmNqBtiNO",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032e733",
"title": "Learning Dense Facial Correspondences in Unconstrained Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e733/12OmNywfKJx",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2016/2305/0/2305a154",
"title": "Compositing Real and Synthetic Images: Using Kinect and Fisheye Camera",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2016/2305a154/12OmNzICEP2",
"parentPublication": {
"id": "proceedings/nicoint/2016/2305/0",
"title": "2016 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/07/ttp2013071674",
"title": "Joint Albedo Estimation and Pose Tracking from Video",
"doi": null,
"abstractUrl": "/journal/tp/2013/07/ttp2013071674/13rRUEgs2Nb",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/11/07776921",
"title": "Adaptive 3D Face Reconstruction from Unconstrained Photo Collections",
"doi": null,
"abstractUrl": "/journal/tp/2017/11/07776921/13rRUxAAT8W",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/6.946E57",
"title": "Enhancing Face Recognition with Self-Supervised 3D Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/6.946E57/1H0KUECAWMU",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/01/08762226",
"title": "On Learning 3D Face Morphable Model from In-the-Wild Images",
"doi": null,
"abstractUrl": "/journal/tp/2021/01/08762226/1bIeDzrgiha",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0152",
"title": "Aggregation via Separation: Boosting Facial Landmark Detector With Semi-Supervised Style Translation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0152/1hVlRmhCM9O",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102812",
"title": "Low-Frequency Guided Self-Supervised Learning For High-Fidelity 3d Face Reconstruction In The Wild",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102812/1kwr0vb7YsM",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1657",
"title": "Normalized Avatar Synthesis Using StyleGAN and Perceptual Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1657/1yeHJbRbxpm",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx2zjwt",
"title": "Proceedings of International Conference on Image Processing",
"acronym": "icip",
"groupId": "1000349",
"volume": "2",
"displayVolume": "2",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAmE613",
"doi": "10.1109/ICIP.1997.638752",
"title": "Extension of the collage theorem",
"normalizedTitle": "Extension of the collage theorem",
"abstract": "Fractal image compression using the iterative function system (IFS) is based on the collage theorem proposed by Barnsley et al. (1986). In this conventional method, the errors between the reconstructed image and original image may be greater than the errors between the collage and original image because the collage theorem does not guarantee the former errors to be smaller than the latter errors. This paper proposes an extended collage theorem. An IFS algorithm based on this theorem determines the parameters after iterations of the contraction mappings. An image reconstructed according to the new theorem has higher quality than one based on the existing collage theorem. The reconstructed image can be gotten by fewer iterations than by the conventional IFS.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Fractal image compression using the iterative function system (IFS) is based on the collage theorem proposed by Barnsley et al. (1986). In this conventional method, the errors between the reconstructed image and original image may be greater than the errors between the collage and original image because the collage theorem does not guarantee the former errors to be smaller than the latter errors. This paper proposes an extended collage theorem. An IFS algorithm based on this theorem determines the parameters after iterations of the contraction mappings. An image reconstructed according to the new theorem has higher quality than one based on the existing collage theorem. The reconstructed image can be gotten by fewer iterations than by the conventional IFS.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Fractal image compression using the iterative function system (IFS) is based on the collage theorem proposed by Barnsley et al. (1986). In this conventional method, the errors between the reconstructed image and original image may be greater than the errors between the collage and original image because the collage theorem does not guarantee the former errors to be smaller than the latter errors. This paper proposes an extended collage theorem. An IFS algorithm based on this theorem determines the parameters after iterations of the contraction mappings. An image reconstructed according to the new theorem has higher quality than one based on the existing collage theorem. The reconstructed image can be gotten by fewer iterations than by the conventional IFS.",
"fno": "81832306",
"keywords": [
"Data Compression Collage Theorem Fractal Image Compression Iterative Function System Reconstructed Image Errors Extended Collage Theorem IFS Algorithm Contraction Mappings Quality Iteration"
],
"authors": [
{
"affiliation": "Sch. of Eng., Hokkaido Univ. Sapporo, Japan",
"fullName": "H. Honda",
"givenName": "H.",
"surname": "Honda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Eng., Hokkaido Univ. Sapporo, Japan",
"fullName": "M. Haseyama",
"givenName": "M.",
"surname": "Haseyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Eng., Hokkaido Univ. Sapporo, Japan",
"fullName": "H. Kitajima",
"givenName": "H.",
"surname": "Kitajima",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Eng., Hokkaido Univ. Sapporo, Japan",
"fullName": "S. Matsumoto",
"givenName": "S.",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icip",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-10-01T00:00:00",
"pubType": "proceedings",
"pages": "306",
"year": "1997",
"issn": null,
"isbn": "0-8186-8183-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "81832302",
"articleId": "12OmNyRxFqA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00638686",
"articleId": "12OmNB7cjkz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyFCvPo",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqzcvIt",
"doi": "10.1109/ICCV.2013.457",
"title": "Scene Collaging: Analysis and Synthesis of Natural Images with Semantic Layers",
"normalizedTitle": "Scene Collaging: Analysis and Synthesis of Natural Images with Semantic Layers",
"abstract": "To quickly synthesize complex scenes, digital artists often collage together visual elements from multiple sources: for example, mountains from New Zealand behind a Scottish castle with wisps of Saharan sand in front. In this paper, we propose to use a similar process in order to parse a scene. We model a scene as a collage of warped, layered objects sampled from labeled, reference images. Each object is related to the rest by a set of support constraints. Scene parsing is achieved through analysis-by-synthesis. Starting with a dataset of labeled exemplar scenes, we retrieve a dictionary of candidate object segments that match a query image. We then combine elements of this set into a \"scene collage\" that explains the query image. Beyond just assigning object labels to pixels, scene collaging produces a lot more information such as the number of each type of object in the scene, how they support one another, the ordinal depth of each object, and, to some degree, occluded content. We exploit this representation for several applications: image editing, random scene synthesis, and image-to-anaglyph.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To quickly synthesize complex scenes, digital artists often collage together visual elements from multiple sources: for example, mountains from New Zealand behind a Scottish castle with wisps of Saharan sand in front. In this paper, we propose to use a similar process in order to parse a scene. We model a scene as a collage of warped, layered objects sampled from labeled, reference images. Each object is related to the rest by a set of support constraints. Scene parsing is achieved through analysis-by-synthesis. Starting with a dataset of labeled exemplar scenes, we retrieve a dictionary of candidate object segments that match a query image. We then combine elements of this set into a \"scene collage\" that explains the query image. Beyond just assigning object labels to pixels, scene collaging produces a lot more information such as the number of each type of object in the scene, how they support one another, the ordinal depth of each object, and, to some degree, occluded content. We exploit this representation for several applications: image editing, random scene synthesis, and image-to-anaglyph.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To quickly synthesize complex scenes, digital artists often collage together visual elements from multiple sources: for example, mountains from New Zealand behind a Scottish castle with wisps of Saharan sand in front. In this paper, we propose to use a similar process in order to parse a scene. We model a scene as a collage of warped, layered objects sampled from labeled, reference images. Each object is related to the rest by a set of support constraints. Scene parsing is achieved through analysis-by-synthesis. Starting with a dataset of labeled exemplar scenes, we retrieve a dictionary of candidate object segments that match a query image. We then combine elements of this set into a \"scene collage\" that explains the query image. Beyond just assigning object labels to pixels, scene collaging produces a lot more information such as the number of each type of object in the scene, how they support one another, the ordinal depth of each object, and, to some degree, occluded content. We exploit this representation for several applications: image editing, random scene synthesis, and image-to-anaglyph.",
"fno": "2840d048",
"keywords": [
"Image Segmentation",
"Dictionaries",
"Context",
"Visualization",
"Grammar",
"Semantics",
"Buildings"
],
"authors": [
{
"affiliation": null,
"fullName": "Phillip Isola",
"givenName": "Phillip",
"surname": "Isola",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ce Liu",
"givenName": "Ce",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "3048-3055",
"year": "2013",
"issn": "1550-5499",
"isbn": "978-1-4799-2840-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2840d040",
"articleId": "12OmNyTwReN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2840d056",
"articleId": "12OmNzTH0UK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/psivt/2010/4285/0/4285a039",
"title": "Semantic Segmentation and Object Recognition Using Scene-Context Scale",
"doi": null,
"abstractUrl": "/proceedings-article/psivt/2010/4285a039/12OmNBQTJg1",
"parentPublication": {
"id": "proceedings/psivt/2010/4285/0",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c299",
"title": "A Hybrid Holistic/Semantic Approach for Scene Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c299/12OmNro0I0m",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521588",
"title": "Improved semantic region labeling based on scene context",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521588/12OmNwFzO1I",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e555",
"title": "Natural Language Object Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e555/12OmNwdL7e3",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460876",
"title": "Segmentation and scene modeling for MIL-based target localization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460876/12OmNxWcHhZ",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d889",
"title": "Optical Flow with Semantic Segmentation and Localized Layers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d889/12OmNzyGH2R",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08057796",
"title": "Narrative Collage of Image Collections by Scene Graph Recombination",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08057796/13rRUwI5TR6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/01/07115183",
"title": "Human-Machine CRFs for Identifying Bottlenecks in Scene Understanding",
"doi": null,
"abstractUrl": "/journal/tp/2016/01/07115183/13rRUyfKIEr",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c252",
"title": "3D Scene Painting via Semantic Image Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c252/1H1lSPqCX04",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/09/09416811",
"title": "Object-Level Scene Context Prediction",
"doi": null,
"abstractUrl": "/journal/tp/2022/09/09416811/1t8VSUW1DfG",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzzxuy8",
"title": "2013 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNsdo6u8",
"doi": "10.1109/CW.2013.74",
"title": "Designing Narrative Interface with a Function of Narrative Generation",
"normalizedTitle": "Designing Narrative Interface with a Function of Narrative Generation",
"abstract": "This paper will propose a new type of \"narrative interface\" including an automatic narrative generation mechanism based on our integrated narrative generation system and the experimental application systems. For the demonstration of three types of narrative interfaces, we introduce Narrative Forest and KOSERUBE as application systems of the integrated system. In the former, a narrative tree structure generated is corresponded to the visual image of a tree and the generation process is always reflected to the growth of the tree. KOSERUBE automatically generates narratives in the style of a folktale with characters/places/objects. They are respectively equivalent to \"symbolic narrative interface\" and \"realistic narrative interface\". Another significant concept of our narrative interface is \"the fluidity and fixation of a narrative as information content\". We will present several examples of narrative interface dependent on the above conceptual ground in this paper.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper will propose a new type of \"narrative interface\" including an automatic narrative generation mechanism based on our integrated narrative generation system and the experimental application systems. For the demonstration of three types of narrative interfaces, we introduce Narrative Forest and KOSERUBE as application systems of the integrated system. In the former, a narrative tree structure generated is corresponded to the visual image of a tree and the generation process is always reflected to the growth of the tree. KOSERUBE automatically generates narratives in the style of a folktale with characters/places/objects. They are respectively equivalent to \"symbolic narrative interface\" and \"realistic narrative interface\". Another significant concept of our narrative interface is \"the fluidity and fixation of a narrative as information content\". We will present several examples of narrative interface dependent on the above conceptual ground in this paper.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper will propose a new type of \"narrative interface\" including an automatic narrative generation mechanism based on our integrated narrative generation system and the experimental application systems. For the demonstration of three types of narrative interfaces, we introduce Narrative Forest and KOSERUBE as application systems of the integrated system. In the former, a narrative tree structure generated is corresponded to the visual image of a tree and the generation process is always reflected to the growth of the tree. KOSERUBE automatically generates narratives in the style of a folktale with characters/places/objects. They are respectively equivalent to \"symbolic narrative interface\" and \"realistic narrative interface\". Another significant concept of our narrative interface is \"the fluidity and fixation of a narrative as information content\". We will present several examples of narrative interface dependent on the above conceptual ground in this paper.",
"fno": "2246a214",
"keywords": [
"Visualization",
"Dictionaries",
"Vegetation",
"Media",
"Games",
"Natural Languages",
"Motion Segmentation",
"Narrative Forest",
"Narrative Interface",
"Integrated Narrative Generation System",
"KOSERUBE"
],
"authors": [
{
"affiliation": "Fac. of Software & Inf. Sci., Iwate Prefectural Univ., Takizawa, Japan",
"fullName": "Takashi Ogata",
"givenName": "Takashi",
"surname": "Ogata",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grad. Sch. of Software & Inf. Sci., Iwate Prefectural Univ., Takizawa, Japan",
"fullName": "Jumpei Ono",
"givenName": "Jumpei",
"surname": "Ono",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-10-01T00:00:00",
"pubType": "proceedings",
"pages": "214-221",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2246-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2246a206",
"articleId": "12OmNzcxZ6N",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2246a222",
"articleId": "12OmNvkpl39",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cyberc/2014/6236/0/6236a066",
"title": "Generating Repudiable, Memorizable, and Privacy Preserving Security Questions Using the Propp Theory of Narrative",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2014/6236a066/12OmNAZfxM0",
"parentPublication": {
"id": "proceedings/cyberc/2014/6236/0",
"title": "2014 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2017/4846/0/484601a144",
"title": "Network Traversal as an Aid to Plot Analysis and Composition",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2017/484601a144/12OmNC4O4Cs",
"parentPublication": {
"id": "proceedings/sbgames/2017/4846/0",
"title": "2017 16th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2013/0174/0/06607870",
"title": "A story generation mechanism based on the cooperation of micro/macro story techniques: As a module in the integrated narrative generation system",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2013/06607870/12OmNqFJhQy",
"parentPublication": {
"id": "proceedings/icis/2013/0174/0",
"title": "2013 IEEE/ACIS 12th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/digitel/2012/4663/0/4663a174",
"title": "Towards an Integrated Narrative Generation System Based on Structural Techniques and Generation Control",
"doi": null,
"abstractUrl": "/proceedings-article/digitel/2012/4663a174/12OmNvF83qF",
"parentPublication": {
"id": "proceedings/digitel/2012/4663/0",
"title": "Digital Game and Intelligent Toy Enhanced Learning, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a254",
"title": "Narrative Grammar in 360",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a254/12OmNxzuMIV",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/digitel/2012/4663/0/4663a165",
"title": "Story Generation System Based on Propp Theory as a Mechanism in Narrative Generation System",
"doi": null,
"abstractUrl": "/proceedings-article/digitel/2012/4663a165/12OmNzT7Ovm",
"parentPublication": {
"id": "proceedings/digitel/2012/4663/0",
"title": "Digital Game and Intelligent Toy Enhanced Learning, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2017/04/07489041",
"title": "Error Analysis in an Automated Narrative Information Extraction Pipeline",
"doi": null,
"abstractUrl": "/journal/ci/2017/04/07489041/13rRUwh80JU",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2014/02/06661342",
"title": "A Computational Model of Narrative Generation for Surprise Arousal",
"doi": null,
"abstractUrl": "/journal/ci/2014/02/06661342/13rRUxBJhxO",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2017/04/07519091",
"title": "Leveraging Intention Revision in Narrative Planning to Create Suspenseful Stories",
"doi": null,
"abstractUrl": "/journal/ci/2017/04/07519091/13rRUxCitLM",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a406",
"title": "Interactive Narrative Facial Expression Animation Generation by Intuitive Curve Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a406/1tnXNLPhC7K",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyO8tMO",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwDj0Zc",
"doi": "10.1109/FG.2018.00125",
"title": "Fast Face and Saliency Aware Collage Creation for Mobile Phones",
"normalizedTitle": "Fast Face and Saliency Aware Collage Creation for Mobile Phones",
"abstract": "This demonstration is of an automated method for creating image collage in a mobile phone. The app creates semantically meaningful collages from images based on faces, image saliency and hybrid blending. The algorithm is designed for computational efficiency inorder for it run on a mobile device. Due to the increase in the use of social networks, users, are uploading images and videos from social events. Collage presents a useful crisper summary of the event. It is popular among users as evident from Layout from Instagram: Collage app, which has over 100 million downloads. A limitation of these apps is that they are dependent on the user for selecting the grid layout and optimal cropping of the images. Our smart collage over comes these limitations and to the best of our knowledge this is one of the first mobile based solutions, which considers the faces and the region importance and later merges the salient images automatically in a non-rigid grid layout. Other interesting works either have a semi-rigid [1] or a fully rigid grid based layout [2] or are desktop based [3].",
"abstracts": [
{
"abstractType": "Regular",
"content": "This demonstration is of an automated method for creating image collage in a mobile phone. The app creates semantically meaningful collages from images based on faces, image saliency and hybrid blending. The algorithm is designed for computational efficiency inorder for it run on a mobile device. Due to the increase in the use of social networks, users, are uploading images and videos from social events. Collage presents a useful crisper summary of the event. It is popular among users as evident from Layout from Instagram: Collage app, which has over 100 million downloads. A limitation of these apps is that they are dependent on the user for selecting the grid layout and optimal cropping of the images. Our smart collage over comes these limitations and to the best of our knowledge this is one of the first mobile based solutions, which considers the faces and the region importance and later merges the salient images automatically in a non-rigid grid layout. Other interesting works either have a semi-rigid [1] or a fully rigid grid based layout [2] or are desktop based [3].",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This demonstration is of an automated method for creating image collage in a mobile phone. The app creates semantically meaningful collages from images based on faces, image saliency and hybrid blending. The algorithm is designed for computational efficiency inorder for it run on a mobile device. Due to the increase in the use of social networks, users, are uploading images and videos from social events. Collage presents a useful crisper summary of the event. It is popular among users as evident from Layout from Instagram: Collage app, which has over 100 million downloads. A limitation of these apps is that they are dependent on the user for selecting the grid layout and optimal cropping of the images. Our smart collage over comes these limitations and to the best of our knowledge this is one of the first mobile based solutions, which considers the faces and the region importance and later merges the salient images automatically in a non-rigid grid layout. Other interesting works either have a semi-rigid [1] or a fully rigid grid based layout [2] or are desktop based [3].",
"fno": "233501a788",
"keywords": [
"Face Recognition",
"Mobile Computing",
"Social Networking Online",
"Fast Face",
"Saliency Aware Collage Creation",
"Mobile Phone",
"Image Collage",
"Image Saliency",
"Hybrid Blending",
"Computational Efficiency Inorder",
"Mobile Device",
"Social Networks",
"Social Events",
"Smart Collage",
"Mobile Based Solutions",
"Salient Images",
"Nonrigid Grid Layout",
"Face",
"Mobile Handsets",
"Layout",
"Conferences",
"Gesture Recognition",
"Social Network Services",
"Videos",
"Collage Creation",
"Face Aware"
],
"authors": [
{
"affiliation": null,
"fullName": "Love Mehta",
"givenName": "Love",
"surname": "Mehta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Abhinav Dhall",
"givenName": "Abhinav",
"surname": "Dhall",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-05-01T00:00:00",
"pubType": "proceedings",
"pages": "788-788",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-2335-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "233501a787",
"articleId": "12OmNzXWZLY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "233501a789",
"articleId": "12OmNASILS4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2016/8942/0/8942a112",
"title": "Sketch Based Picture-Collage Generation Using Evolutionary Computation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a112/12OmNrJ11zk",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543752",
"title": "Mobile photo collage",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543752/12OmNvAiSJ6",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a774",
"title": "Landmark-Based 3D Face Reconstruction from an Arbitrary Number of Unconstrained Images",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a774/12OmNvxsSVK",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2016/2305/0/2305a140",
"title": "A System for Dynamically Generating Photo Collage Based on Aesthetics",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2016/2305a140/12OmNxGja3B",
"parentPublication": {
"id": "proceedings/nicoint/2016/2305/0",
"title": "2016 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08057796",
"title": "Narrative Collage of Image Collections by Scene Graph Recombination",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08057796/13rRUwI5TR6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/08/ttp2008081346",
"title": "Deformation Modeling for Robust 3D Face Matching",
"doi": null,
"abstractUrl": "/journal/tp/2008/08/ttp2008081346/13rRUyfbwrW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09667079",
"title": "GazeGrid: A Novel Interaction Method Based on Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09667079/1A6Boxo4Yyk",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600d719",
"title": "SoftCollage: A Differentiable Probabilistic Tree Generator for Image Collage",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600d719/1H0NizTiNUI",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10081386",
"title": "Image Collage on Arbitrary Shape via Shape-Aware Slicing and Optimization",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10081386/1LRbSaS1ClW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09540348",
"title": "Balance-Aware Grid Collage for Small Image Collections",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09540348/1wWCehU44hi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmHdPqdGUM",
"doi": "10.1109/ICCV48922.2021.00140",
"title": "Panoptic Narrative Grounding",
"normalizedTitle": "Panoptic Narrative Grounding",
"abstract": "This paper proposes Panoptic Narrative Grounding, a spatially fine and general formulation of the natural language visual grounding problem. We establish an experimental framework for the study of this new task, including new ground truth and metrics, and we propose a strong baseline method to serve as stepping stone for future work. We exploit the intrinsic semantic richness in an image by including panoptic categories, and we approach visual grounding at a fine-grained level by using segmentations. In terms of ground truth, we propose an algorithm to automatically transfer Localized Narratives annotations to specific regions in the panoptic segmentations of the MS COCO dataset. To guarantee the quality of our annotations, we take advantage of the semantic structure contained in WordNet to exclusively incorporate noun phrases that are grounded to a meaningfully related panoptic segmentation region. The proposed baseline achieves a performance of 55.4 absolute Average Recall points. This result is a suitable foundation to push the envelope further in the development of methods for Panoptic Narrative Grounding.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes Panoptic Narrative Grounding, a spatially fine and general formulation of the natural language visual grounding problem. We establish an experimental framework for the study of this new task, including new ground truth and metrics, and we propose a strong baseline method to serve as stepping stone for future work. We exploit the intrinsic semantic richness in an image by including panoptic categories, and we approach visual grounding at a fine-grained level by using segmentations. In terms of ground truth, we propose an algorithm to automatically transfer Localized Narratives annotations to specific regions in the panoptic segmentations of the MS COCO dataset. To guarantee the quality of our annotations, we take advantage of the semantic structure contained in WordNet to exclusively incorporate noun phrases that are grounded to a meaningfully related panoptic segmentation region. The proposed baseline achieves a performance of 55.4 absolute Average Recall points. This result is a suitable foundation to push the envelope further in the development of methods for Panoptic Narrative Grounding.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes Panoptic Narrative Grounding, a spatially fine and general formulation of the natural language visual grounding problem. We establish an experimental framework for the study of this new task, including new ground truth and metrics, and we propose a strong baseline method to serve as stepping stone for future work. We exploit the intrinsic semantic richness in an image by including panoptic categories, and we approach visual grounding at a fine-grained level by using segmentations. In terms of ground truth, we propose an algorithm to automatically transfer Localized Narratives annotations to specific regions in the panoptic segmentations of the MS COCO dataset. To guarantee the quality of our annotations, we take advantage of the semantic structure contained in WordNet to exclusively incorporate noun phrases that are grounded to a meaningfully related panoptic segmentation region. The proposed baseline achieves a performance of 55.4 absolute Average Recall points. This result is a suitable foundation to push the envelope further in the development of methods for Panoptic Narrative Grounding.",
"fno": "281200b344",
"keywords": [
"Measurement",
"Visualization",
"Image Segmentation",
"Computer Vision",
"Grounding",
"Annotations",
"Semantics",
"Vision Language",
"Scene Analysis And Understanding",
"Segmentation",
"Grouping And Shape"
],
"authors": [
{
"affiliation": "Universidad de los Andes,Center for Research and Formation in Artificial Intelligence,Colombia",
"fullName": "Cristina González",
"givenName": "Cristina",
"surname": "González",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidad de los Andes,Center for Research and Formation in Artificial Intelligence,Colombia",
"fullName": "Nicolás Ayobi",
"givenName": "Nicolás",
"surname": "Ayobi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidad de los Andes,Center for Research and Formation in Artificial Intelligence,Colombia",
"fullName": "Isabela Hernández",
"givenName": "Isabela",
"surname": "Hernández",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidad de los Andes,Center for Research and Formation in Artificial Intelligence,Colombia",
"fullName": "José Hernández",
"givenName": "José",
"surname": "Hernández",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research,Switzerland",
"fullName": "Jordi Pont-Tuset",
"givenName": "Jordi",
"surname": "Pont-Tuset",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidad de los Andes,Center for Research and Formation in Artificial Intelligence,Colombia",
"fullName": "Pablo Arbeláez",
"givenName": "Pablo",
"surname": "Arbeláez",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1344-1353",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1BmHdMgVFqo",
"name": "piccv202128120-09710546s1-mm_281200b344.zip",
"size": "1.58 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/piccv202128120-09710546s1-mm_281200b344.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "281200b335",
"articleId": "1BmEw56nOlq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200b354",
"articleId": "1BmHPJWN2tW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200k0108",
"title": "FloorPlanCAD: A Large-Scale CAD Drawing Dataset for Panoptic Symbol Spotting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0108/1BmECxiwqgU",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600v1001",
"title": "Large-scale Video Panoptic Segmentation in the Wild: A Benchmark",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600v1001/1H0L8K6RLnW",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c456",
"title": "Semi-supervised Video Paragraph Grounding with Contrastive Encoder",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c456/1H1irywrCbm",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a301",
"title": "Panoptic NeRF: 3D-to-2D Label Transfer for Panoptic Urban Scene Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a301/1KYstcv8HRe",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300g371",
"title": "Modularized Textual Grounding for Counterfactual Resilience",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300g371/1gys27VXZUA",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800j856",
"title": "Video Panoptic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800j856/1m3nuV4vGBW",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700d852",
"title": "Boosting Monocular Depth with Panoptic Segmentation Maps",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700d852/1uqGrGgcTJu",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900i441",
"title": "Embracing Uncertainty: Decoupling and De-bias for Robust Temporal Grounding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900i441/1yeJiSsAH1S",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900q6689",
"title": "Toward Joint Thing-and-Stuff Mining for Weakly Supervised Panoptic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900q6689/1yeK4v25kiY",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f481",
"title": "Part-aware Panoptic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f481/1yeLcwtrpBK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0NizTiNUI",
"doi": "10.1109/CVPR52688.2022.00371",
"title": "SoftCollage: A Differentiable Probabilistic Tree Generator for Image Collage",
"normalizedTitle": "SoftCollage: A Differentiable Probabilistic Tree Generator for Image Collage",
"abstract": "Image collage task aims to create an informative and visual-aesthetic visual summarization for an image collection. While several recent works exploit tree-based algorithm to preserve image content better, all of them resort to hand-crafted adjustment rules to optimize the collage tree structure, leading to the failure of fully exploring the structure space of collage tree. Our key idea is to soften the discrete tree structure space into a continuous probability space. We propose SoftCollage, a novel method that employs a neural-based differentiable probabilistic tree generator to produce the probability distribution of correlation-preserving collage tree conditioned on deep image feature, aspect ratio and canvas size. The differentiable characteristic allows us to formulate the tree-based collage generation as a differentiable process and directly exploit gradient to optimize the collage layout in the level of probability space in an end-to-end manner. To facilitate image collage research, we propose AIC, a large-scale public-available annotated dataset for image collage evaluation. Extensive experiments on the introduced dataset demonstrate the superior performance of the proposed method. Data and codes are available at https://github.com/ChineseYjh/SoftCollage.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image collage task aims to create an informative and visual-aesthetic visual summarization for an image collection. While several recent works exploit tree-based algorithm to preserve image content better, all of them resort to hand-crafted adjustment rules to optimize the collage tree structure, leading to the failure of fully exploring the structure space of collage tree. Our key idea is to soften the discrete tree structure space into a continuous probability space. We propose SoftCollage, a novel method that employs a neural-based differentiable probabilistic tree generator to produce the probability distribution of correlation-preserving collage tree conditioned on deep image feature, aspect ratio and canvas size. The differentiable characteristic allows us to formulate the tree-based collage generation as a differentiable process and directly exploit gradient to optimize the collage layout in the level of probability space in an end-to-end manner. To facilitate image collage research, we propose AIC, a large-scale public-available annotated dataset for image collage evaluation. Extensive experiments on the introduced dataset demonstrate the superior performance of the proposed method. Data and codes are available at https://github.com/ChineseYjh/SoftCollage.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image collage task aims to create an informative and visual-aesthetic visual summarization for an image collection. While several recent works exploit tree-based algorithm to preserve image content better, all of them resort to hand-crafted adjustment rules to optimize the collage tree structure, leading to the failure of fully exploring the structure space of collage tree. Our key idea is to soften the discrete tree structure space into a continuous probability space. We propose SoftCollage, a novel method that employs a neural-based differentiable probabilistic tree generator to produce the probability distribution of correlation-preserving collage tree conditioned on deep image feature, aspect ratio and canvas size. The differentiable characteristic allows us to formulate the tree-based collage generation as a differentiable process and directly exploit gradient to optimize the collage layout in the level of probability space in an end-to-end manner. To facilitate image collage research, we propose AIC, a large-scale public-available annotated dataset for image collage evaluation. Extensive experiments on the introduced dataset demonstrate the superior performance of the proposed method. Data and codes are available at https://github.com/ChineseYjh/SoftCollage.",
"fno": "694600d719",
"keywords": [
"Image Representation",
"Optimisation",
"Probability",
"Trees Mathematics",
"Image Collage Task Aims",
"Informative Summarization",
"Visual Aesthetic Visual Summarization",
"Image Collection",
"Tree Based Algorithm",
"Image Content",
"Hand Crafted Adjustment Rules",
"Collage Tree Structure",
"Discrete Tree Structure Space",
"Continuous Probability Space",
"Neural Based Differentiable Probabilistic Tree Generator",
"Probability Distribution",
"Correlation Preserving Collage Tree",
"Deep Image Feature",
"Differentiable Characteristic",
"Tree Based Collage Generation",
"Differentiable Process",
"Collage Layout",
"Image Collage Research",
"Image Collage Evaluation",
"Visualization",
"Computational Modeling",
"Layout",
"Memory Management",
"Probabilistic Logic",
"Generators",
"Probability Distribution"
],
"authors": [
{
"affiliation": "School of Software, BNRist, Tsinghua University,Beijing,China",
"fullName": "Jiahao Yu",
"givenName": "Jiahao",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Software, BNRist, Tsinghua University,Beijing,China",
"fullName": "Li Chen",
"givenName": "Li",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Software, BNRist, Tsinghua University,Beijing,China",
"fullName": "Mingrui Zhang",
"givenName": "Mingrui",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kuaishou Technology,Beijing,China",
"fullName": "Mading Li",
"givenName": "Mading",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3719-3728",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0NiwmatuU",
"name": "pcvpr202269460-09879604s1-mm_694600d719.zip",
"size": "5.71 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879604s1-mm_694600d719.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600d708",
"articleId": "1H0N399JHcA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600d729",
"articleId": "1H1m1wIMuNG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2016/8942/0/8942a112",
"title": "Sketch Based Picture-Collage Generation Using Evolutionary Computation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a112/12OmNrJ11zk",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbr-lars-r/2016/3656/0/07783529",
"title": "A New Robot Path Planning Method Based on Probabilistic Foam",
"doi": null,
"abstractUrl": "/proceedings-article/sbr-lars-r/2016/07783529/12OmNwCaCz1",
"parentPublication": {
"id": "proceedings/sbr-lars-r/2016/3656/0",
"title": "2016 XIII Latin-American Robotics Symposium and IV Brazilian Robotics Symposium (LARS/SBR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a788",
"title": "Fast Face and Saliency Aware Collage Creation for Mobile Phones",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a788/12OmNwDj0Zc",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08057796",
"title": "Narrative Collage of Image Collections by Scene Graph Recombination",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08057796/13rRUwI5TR6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539319",
"title": "Probabilistic Graph Layout for Uncertain Network Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539319/13rRUwh80Hi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10081386",
"title": "Image Collage on Arbitrary Shape via Shape-Aware Slicing and Optimization",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10081386/1LRbSaS1ClW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a454",
"title": "Trcollage: Efficient Image Collage Using Tree-Based Layer Reordering",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a454/1ap5A1IuXIY",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413273",
"title": "PROPEL: Probabilistic Parametric Regression Loss for Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413273/1tmjYz9VeZq",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2021/9184/0/918400c411",
"title": "Prefix-Graph: A Versatile Log Parsing Approach Merging Prefix Tree with Probabilistic Graph",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2021/918400c411/1uGXyu8tgwE",
"parentPublication": {
"id": "proceedings/icde/2021/9184/0",
"title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09540348",
"title": "Balance-Aware Grid Collage for Small Image Collections",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09540348/1wWCehU44hi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3ndHyh4oE",
"doi": "10.1109/CVPR42600.2020.00526",
"title": "Semantic Image Manipulation Using Scene Graphs",
"normalizedTitle": "Semantic Image Manipulation Using Scene Graphs",
"abstract": "Image manipulation can be considered a special case of image generation where the image to be produced is a modification of an existing image. Image generation and manipulation have been, for the most part, tasks that operate on raw pixels. However, the remarkable progress in learning rich image and object representations has opened the way for tasks such as text-to-image or layout-to-image generation that are mainly driven by semantics. In our work, we address the novel problem of image manipulation from scene graphs, in which a user can edit images by merely applying changes in the nodes or edges of a semantic graph that is generated from the image. Our goal is to encode image information in a given constellation and from there on generate new constellations, such as replacing objects or even changing relationships between objects, while respecting the semantics and style from the original image. We introduce a spatio-semantic scene graph network that does not require direct supervision for constellation changes or image edits. This makes it possible to train the system from existing real-world datasets with no additional annotation effort.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image manipulation can be considered a special case of image generation where the image to be produced is a modification of an existing image. Image generation and manipulation have been, for the most part, tasks that operate on raw pixels. However, the remarkable progress in learning rich image and object representations has opened the way for tasks such as text-to-image or layout-to-image generation that are mainly driven by semantics. In our work, we address the novel problem of image manipulation from scene graphs, in which a user can edit images by merely applying changes in the nodes or edges of a semantic graph that is generated from the image. Our goal is to encode image information in a given constellation and from there on generate new constellations, such as replacing objects or even changing relationships between objects, while respecting the semantics and style from the original image. We introduce a spatio-semantic scene graph network that does not require direct supervision for constellation changes or image edits. This makes it possible to train the system from existing real-world datasets with no additional annotation effort.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image manipulation can be considered a special case of image generation where the image to be produced is a modification of an existing image. Image generation and manipulation have been, for the most part, tasks that operate on raw pixels. However, the remarkable progress in learning rich image and object representations has opened the way for tasks such as text-to-image or layout-to-image generation that are mainly driven by semantics. In our work, we address the novel problem of image manipulation from scene graphs, in which a user can edit images by merely applying changes in the nodes or edges of a semantic graph that is generated from the image. Our goal is to encode image information in a given constellation and from there on generate new constellations, such as replacing objects or even changing relationships between objects, while respecting the semantics and style from the original image. We introduce a spatio-semantic scene graph network that does not require direct supervision for constellation changes or image edits. This makes it possible to train the system from existing real-world datasets with no additional annotation effort.",
"fno": "716800f212",
"keywords": [
"Graph Theory",
"Image Coding",
"Image Representation",
"Learning Artificial Intelligence",
"Semantic Image Manipulation",
"Scene Graphs",
"Image Learning",
"Semantic Graph",
"Image Information Encoding",
"Spatio Semantic Scene Graph Network",
"Image Edits",
"Image Generation",
"Object Representations",
"Semantics",
"Image Generation",
"Task Analysis",
"Visualization",
"Image Edge Detection",
"Image Reconstruction",
"Layout"
],
"authors": [
{
"affiliation": "Technische Universität München",
"fullName": "Helisa Dhamo",
"givenName": "Helisa",
"surname": "Dhamo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München",
"fullName": "Azade Farshad",
"givenName": "Azade",
"surname": "Farshad",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München; University of Oxford",
"fullName": "Iro Laina",
"givenName": "Iro",
"surname": "Laina",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München; Johns Hopkins University",
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Johns Hopkins University",
"fullName": "Gregory D. Hager",
"givenName": "Gregory D.",
"surname": "Hager",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München; Google",
"fullName": "Federico Tombari",
"givenName": "Federico",
"surname": "Tombari",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Oxford",
"fullName": "Christian Rupprecht",
"givenName": "Christian",
"surname": "Rupprecht",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "5212-5221",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800f202",
"articleId": "1m3o3o90ONa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800f222",
"articleId": "1m3nCSczNT2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icacc/2013/5033/0/06686375",
"title": "Feature Based Image Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/icacc/2013/06686375/12OmNCbU2Q6",
"parentPublication": {
"id": "proceedings/icacc/2013/5033/0",
"title": "2013 Third International Conference on Advances in Computing and Communications (ICACC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b219",
"title": "Image Generation from Scene Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b219/17D45XwUAKS",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3749",
"title": "Image Shape Manipulation from a Single Augmented Training Sample",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3749/1BmKNCO62iI",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09794591",
"title": "Semantic Layout Manipulation With High-Resolution Sparse Attention",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09794591/1Eb14834UiQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859841",
"title": "High-Quality Image Generation from Scene Graphs with Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859841/1G9DSypmuNa",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5596",
"title": "Scene Graph Expansion for Semantics-Guided Image Outpainting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5596/1H0OzDV0MZa",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300e480",
"title": "FiNet: Compatible and Diverse Fashion Image Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300e480/1hQqkg1bJvi",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f173",
"title": "SketchyCOCO: Image Generation From Freehand Scene Sketches",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f173/1m3nEX4Ksj6",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h877",
"title": "ManiGAN: Text-Guided Image Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h877/1m3nOFPFzb2",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09547753",
"title": "Exploiting Deep Generative Prior for Versatile Image Restoration and Manipulation",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09547753/1x9Tvtcuj5e",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeJbBelsbe",
"doi": "10.1109/CVPR46437.2021.00922",
"title": "Semantic Palette: Guiding Scene Generation with Class Proportions",
"normalizedTitle": "Semantic Palette: Guiding Scene Generation with Class Proportions",
"abstract": "Despite the recent progress of generative adversarial networks (GANs) at synthesizing photo-realistic images, producing complex urban scenes remains a challenging problem. Previous works break down scene generation into two consecutive phases: unconditional semantic layout synthesis and image synthesis conditioned on layouts. In this work, we propose to condition layout generation as well for higher semantic control: given a vector of class proportions, we generate layouts with matching composition. To this end, we introduce a conditional framework with novel architecture designs and learning objectives, which effectively accommodates class proportions to guide the scene generation process. The proposed architecture also allows partial layout editing with interesting applications. Thanks to the semantic control, we can produce layouts close to the real distribution, helping enhance the whole scene generation process. On different metrics and urban scene benchmarks, our models outperform existing baselines. Moreover, we demonstrate the merit of our approach for data augmentation: semantic segmenters trained on real layout-image pairs along with additional ones generated by our approach outperform models only trained on real pairs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite the recent progress of generative adversarial networks (GANs) at synthesizing photo-realistic images, producing complex urban scenes remains a challenging problem. Previous works break down scene generation into two consecutive phases: unconditional semantic layout synthesis and image synthesis conditioned on layouts. In this work, we propose to condition layout generation as well for higher semantic control: given a vector of class proportions, we generate layouts with matching composition. To this end, we introduce a conditional framework with novel architecture designs and learning objectives, which effectively accommodates class proportions to guide the scene generation process. The proposed architecture also allows partial layout editing with interesting applications. Thanks to the semantic control, we can produce layouts close to the real distribution, helping enhance the whole scene generation process. On different metrics and urban scene benchmarks, our models outperform existing baselines. Moreover, we demonstrate the merit of our approach for data augmentation: semantic segmenters trained on real layout-image pairs along with additional ones generated by our approach outperform models only trained on real pairs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite the recent progress of generative adversarial networks (GANs) at synthesizing photo-realistic images, producing complex urban scenes remains a challenging problem. Previous works break down scene generation into two consecutive phases: unconditional semantic layout synthesis and image synthesis conditioned on layouts. In this work, we propose to condition layout generation as well for higher semantic control: given a vector of class proportions, we generate layouts with matching composition. To this end, we introduce a conditional framework with novel architecture designs and learning objectives, which effectively accommodates class proportions to guide the scene generation process. The proposed architecture also allows partial layout editing with interesting applications. Thanks to the semantic control, we can produce layouts close to the real distribution, helping enhance the whole scene generation process. On different metrics and urban scene benchmarks, our models outperform existing baselines. Moreover, we demonstrate the merit of our approach for data augmentation: semantic segmenters trained on real layout-image pairs along with additional ones generated by our approach outperform models only trained on real pairs.",
"fno": "450900j338",
"keywords": [
"Feature Extraction",
"Image Classification",
"Image Matching",
"Image Segmentation",
"Integrated Circuit Layout",
"Learning Artificial Intelligence",
"Natural Scenes",
"Realistic Images",
"Video Signal Processing",
"Guiding Scene Generation",
"Class Proportions",
"Generative Adversarial Networks",
"Photo Realistic Images",
"Complex Urban Scenes",
"Unconditional Semantic Layout Synthesis",
"Image Synthesis",
"Condition Layout Generation",
"Higher Semantic Control",
"Conditional Framework",
"Architecture Designs",
"Learning Objectives",
"Scene Generation Process",
"Partial Layout Editing",
"Urban Scene Benchmarks",
"Semantic Segmenters",
"Layout Image Pairs",
"Semantic Palette",
"Training",
"Measurement",
"Image Segmentation",
"Image Synthesis",
"Semantics",
"Layout",
"Process Control"
],
"authors": [
{
"affiliation": "Inria",
"fullName": "Guillaume Le Moing",
"givenName": "Guillaume",
"surname": "Le Moing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Valeo.ai",
"fullName": "Tuan-Hung Vu",
"givenName": "Tuan-Hung",
"surname": "Vu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Valeo.ai",
"fullName": "Himalaya Jain",
"givenName": "Himalaya",
"surname": "Jain",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Valeo.ai",
"fullName": "Patrick Pérez",
"givenName": "Patrick",
"surname": "Pérez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Valeo.ai",
"fullName": "Matthieu Cord",
"givenName": "Matthieu",
"surname": "Cord",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "9338-9346",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeJbvMhcgo",
"name": "pcvpr202145090-09577811s1-mm_450900j338.zip",
"size": "8.11 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577811s1-mm_450900j338.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900j326",
"articleId": "1yeJjJbriWQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900j347",
"articleId": "1yeK5XhoiiI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200n3799",
"title": "Image Synthesis from Layout with Locality-Aware Mask Adaption",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3799/1BmGJmzmBTq",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859841",
"title": "High-Quality Image Generation from Scene Graphs with Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859841/1G9DSypmuNa",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8092",
"title": "Text-to-Image Synthesis based on Object-Guided Joint-Decoding Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8092/1H1l7q04Gac",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c252",
"title": "3D Scene Painting via Semantic Image Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c252/1H1lSPqCX04",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600h773",
"title": "Interactive Image Synthesis with Panoptic Layout Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600h773/1H1mlrOB1Wo",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300j894",
"title": "LayoutVAE: Stochastic Scene Layout Generation From a Label Set",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300j894/1hQqjMNDJny",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09128027",
"title": "Motion Planning for Convertible Indoor Scene Layout Design",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09128027/1l3unTAaNuE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d753",
"title": "End-to-End Optimization of Scene Layout",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d753/1m3ooUhHlVC",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a902",
"title": "Learning Semantic-Aware Dynamics for Video Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a902/1yeITpuAS7m",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d731",
"title": "LayoutTransformer: Scene Layout Generation with Conceptual and Spatial Diversity",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d731/1yeKJqXSUh2",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCmpcNk",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBKmXnI",
"doi": "10.1109/VISUAL.2005.1532863",
"title": "Illustrative rendering techniques for visualization: Future of visualization or just another technique?",
"normalizedTitle": "Illustrative rendering techniques for visualization: Future of visualization or just another technique?",
"abstract": "Illustrative rendering, often also depicted as non-photorealistic rendering1 or stylized rendering, employs abstraction techniques to convey the relevant information, and de-emphasize less important details. The question remains how this abstraction process is guided and in particular how can we ensure that relevant information is maintained. Consequently, research on illustrative rendering needs to address how the information is perceived by the human observer, next to the investigation of algorithmic aspects. In this panel, we discuss various aspects on this topic. Kwan-Liu Ma discusses how illustrative rendering can be used in scientifc visualization, and Bernhard Preim explores its use for the visualization in the medical imaging domain. Perception aspects are presented by Victoria Interrante. A different perspective, if illustrative rendering is useful for typical visualization problems, is added by Hans Hagen.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Illustrative rendering, often also depicted as non-photorealistic rendering1 or stylized rendering, employs abstraction techniques to convey the relevant information, and de-emphasize less important details. The question remains how this abstraction process is guided and in particular how can we ensure that relevant information is maintained. Consequently, research on illustrative rendering needs to address how the information is perceived by the human observer, next to the investigation of algorithmic aspects. In this panel, we discuss various aspects on this topic. Kwan-Liu Ma discusses how illustrative rendering can be used in scientifc visualization, and Bernhard Preim explores its use for the visualization in the medical imaging domain. Perception aspects are presented by Victoria Interrante. A different perspective, if illustrative rendering is useful for typical visualization problems, is added by Hans Hagen.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Illustrative rendering, often also depicted as non-photorealistic rendering1 or stylized rendering, employs abstraction techniques to convey the relevant information, and de-emphasize less important details. The question remains how this abstraction process is guided and in particular how can we ensure that relevant information is maintained. Consequently, research on illustrative rendering needs to address how the information is perceived by the human observer, next to the investigation of algorithmic aspects. In this panel, we discuss various aspects on this topic. Kwan-Liu Ma discusses how illustrative rendering can be used in scientifc visualization, and Bernhard Preim explores its use for the visualization in the medical imaging domain. Perception aspects are presented by Victoria Interrante. A different perspective, if illustrative rendering is useful for typical visualization problems, is added by Hans Hagen.",
"fno": "01532863",
"keywords": [
"Visualization",
"Rendering Computer Graphics",
"Biomedical Imaging",
"Computer Science",
"Abdomen",
"Colon",
"Skeleton",
"Skin",
"Skull",
"Head"
],
"authors": [
{
"affiliation": null,
"fullName": "D. Bartz",
"givenName": "D.",
"surname": "Bartz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "H. Hagen",
"givenName": "H.",
"surname": "Hagen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "V. Interrante",
"givenName": "V.",
"surname": "Interrante",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kwan-Liu Ma",
"givenName": null,
"surname": "Kwan-Liu Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "B. Preim",
"givenName": "B.",
"surname": "Preim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2005-01-01T00:00:00",
"pubType": "proceedings",
"pages": "715,716,717,718",
"year": "2005",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "27660078",
"articleId": "12OmNvAiSkq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "27660079",
"articleId": "12OmNBDQbfz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2011/4548/0/4548a101",
"title": "Illustrative Volume Visualization for Unstructured Meshes Based on Photic Extremum Lines",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2011/4548a101/12OmNy5R3zz",
"parentPublication": {
"id": "proceedings/sibgrapi/2011/4548/0",
"title": "2011 24th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720357",
"title": "Importance-Aware Composition for Illustrative Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720357/12OmNz5apMR",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596137",
"title": "Illustrative visualization of cardiac and aortic blood flow from 4D MRI data",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596137/12OmNzC5SHi",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/1/3118a341",
"title": "Perception-aware Depth Cueing for Illustrative Vascular Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118a341/12OmNzvhvKm",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/1",
"title": "2008 International Conference on Biomedical Engineering and Informatics (BMEI 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061299",
"title": "Depth-Dependent Halos: Illustrative Rendering of Dense Line Data",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061299/13rRUEgs2LX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2003/02/v0127",
"title": "Illustrative Interactive Stipple Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2003/02/v0127/13rRUIIVlcA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/03/mcg2007030048",
"title": "Interactive Illustrative Rendering on Mobile Devices",
"doi": null,
"abstractUrl": "/magazine/cg/2007/03/mcg2007030048/13rRUNvyanr",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/04/ttg2010040571",
"title": "Illustrative Volume Visualization Using GPU-Based Particle Systems",
"doi": null,
"abstractUrl": "/journal/tg/2010/04/ttg2010040571/13rRUwgyOjh",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061739",
"title": "Color Design for Illustrative Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061739/13rRUxE04tv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCmpcNk",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzTH0TG",
"doi": "10.1109/VIS.2005.120",
"title": "Visualization in the Einstein Year 2005: A Case Study on Explanatory and Illustrative Visualization of Relativity and Astrophysics",
"normalizedTitle": "Visualization in the Einstein Year 2005: A Case Study on Explanatory and Illustrative Visualization of Relativity and Astrophysics",
"abstract": "In this application paper, we report on over fifteen years of experience with relativistic and astrophysical visualization, which has been culminating in a substantial engagement for visualization in the Einstein Year 2005 - the 100th anniversary of Einstein's publications on special relativity, the photoelectric effect, and Brownian motion. This paper focuses on explanatory and illustrative visualizations used to communicate aspects of the difficult theories of special and general relativity, their geometric structure, and of the related fields of cosmology and astrophysics. We discuss visualization strategies, motivated by physics education and didactics of mathematics, and describe what kind of visualization methods have proven to be useful for different types of media, such as still images in popular-science magazines, film contributions to TV shows, oral presentations, or interactive museum installations. Although our visualization tools build upon existing methods and implementations, these techniques have been improved by several novel technical contributions like image-based special relativistic rendering on GPUs, an extension of general relativistic ray tracing to manifolds described by multiple charts, GPU-based interactive visualization of gravitational light deflection, as well as planetary terrain rendering. The usefulness and effectiveness of our visualizations are demonstrated by reporting on experiences with, and feedback from, recipients of visualizations and collaborators.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this application paper, we report on over fifteen years of experience with relativistic and astrophysical visualization, which has been culminating in a substantial engagement for visualization in the Einstein Year 2005 - the 100th anniversary of Einstein's publications on special relativity, the photoelectric effect, and Brownian motion. This paper focuses on explanatory and illustrative visualizations used to communicate aspects of the difficult theories of special and general relativity, their geometric structure, and of the related fields of cosmology and astrophysics. We discuss visualization strategies, motivated by physics education and didactics of mathematics, and describe what kind of visualization methods have proven to be useful for different types of media, such as still images in popular-science magazines, film contributions to TV shows, oral presentations, or interactive museum installations. Although our visualization tools build upon existing methods and implementations, these techniques have been improved by several novel technical contributions like image-based special relativistic rendering on GPUs, an extension of general relativistic ray tracing to manifolds described by multiple charts, GPU-based interactive visualization of gravitational light deflection, as well as planetary terrain rendering. The usefulness and effectiveness of our visualizations are demonstrated by reporting on experiences with, and feedback from, recipients of visualizations and collaborators.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this application paper, we report on over fifteen years of experience with relativistic and astrophysical visualization, which has been culminating in a substantial engagement for visualization in the Einstein Year 2005 - the 100th anniversary of Einstein's publications on special relativity, the photoelectric effect, and Brownian motion. This paper focuses on explanatory and illustrative visualizations used to communicate aspects of the difficult theories of special and general relativity, their geometric structure, and of the related fields of cosmology and astrophysics. We discuss visualization strategies, motivated by physics education and didactics of mathematics, and describe what kind of visualization methods have proven to be useful for different types of media, such as still images in popular-science magazines, film contributions to TV shows, oral presentations, or interactive museum installations. Although our visualization tools build upon existing methods and implementations, these techniques have been improved by several novel technical contributions like image-based special relativistic rendering on GPUs, an extension of general relativistic ray tracing to manifolds described by multiple charts, GPU-based interactive visualization of gravitational light deflection, as well as planetary terrain rendering. The usefulness and effectiveness of our visualizations are demonstrated by reporting on experiences with, and feedback from, recipients of visualizations and collaborators.",
"fno": "27660074",
"keywords": [
"Visualization",
"Explanatory Computer Graphics",
"Illustrative Visualization",
"Special Relativity",
"General Relativity",
"Astrophysics",
"Visualization Of Mathematics",
"Terrain Rendering"
],
"authors": [
{
"affiliation": "Simon Fraser University",
"fullName": "Daniel Weiskopf",
"givenName": "Daniel",
"surname": "Weiskopf",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Marc Borchers",
"givenName": "Marc",
"surname": "Borchers",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Stuttgart",
"fullName": "Thomas Ertl",
"givenName": "Thomas",
"surname": "Ertl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Stuttgart",
"fullName": "Martin Falk",
"givenName": "Martin",
"surname": "Falk",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Oliver Fechtig",
"givenName": "Oliver",
"surname": "Fechtig",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Regine Frank",
"givenName": "Regine",
"surname": "Frank",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Frank Grave",
"givenName": "Frank",
"surname": "Grave",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Andreas King",
"givenName": "Andreas",
"surname": "King",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Ute Kraus",
"givenName": "Ute",
"surname": "Kraus",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Thomas Muller",
"givenName": "Thomas",
"surname": "Muller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Hans-Peter Nollert",
"givenName": "Hans-Peter",
"surname": "Nollert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Isabel Rica Mendez",
"givenName": "Isabel Rica",
"surname": "Mendez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Hanns Ruder",
"givenName": "Hanns",
"surname": "Ruder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Corvin Zahn",
"givenName": "Corvin",
"surname": "Zahn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tubingen",
"fullName": "Michael Zatloukal",
"givenName": "Michael",
"surname": "Zatloukal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Stuttgart",
"fullName": "Tobias Schafhitzel",
"givenName": "Tobias",
"surname": "Schafhitzel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Historisches Museum Bern",
"fullName": "Sonja Schar",
"givenName": "Sonja",
"surname": "Schar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-10-01T00:00:00",
"pubType": "proceedings",
"pages": "74",
"year": "2005",
"issn": null,
"isbn": "0-7803-9462-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01532858",
"articleId": "12OmNscxjaI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "27660075",
"articleId": "12OmNqI04RI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2007/2928/0/29280109",
"title": "Designing a Computer Game to Teach Einstein?s Theory of Relativity",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2007/29280109/12OmNBubOSk",
"parentPublication": {
"id": "proceedings/cgiv/2007/2928/0",
"title": "Computer Graphics, Imaging and Visualisation (CGIV 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2010/4166/0/4166a035",
"title": "Visual Computer Game Features for Teaching Relativity",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2010/4166a035/12OmNCwCLuJ",
"parentPublication": {
"id": "proceedings/cgiv/2010/4166/0",
"title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759468",
"title": "A carom billiard to understand special relativity",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759468/12OmNqJ8tn2",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2000/6478/0/64780051",
"title": "Real-World Relativity: Image-Based Special Relativistic Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780051/12OmNrJAdYh",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532845",
"title": "Visualization in the Einstein Year 2005: a case study on explanatory and illustrative visualization of relativity and astrophysics",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532845/12OmNvlxJsW",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/1990/2056/0/00130108",
"title": "Time dilation visualization in relativity",
"doi": null,
"abstractUrl": "/proceedings-article/sc/1990/00130108/12OmNxFJXs6",
"parentPublication": {
"id": "proceedings/sc/1990/2056/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/1989/341/0/05348962",
"title": "Visualizing relativistic effects in spacetime",
"doi": null,
"abstractUrl": "/proceedings-article/sc/1989/05348962/12OmNyNQSB6",
"parentPublication": {
"id": "proceedings/sc/1989/341/0",
"title": "Proceedings of the 1989 ACM/IEEE Conference on Supercomputing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/06/mcs2011060064",
"title": "General-Relativistic Visualization",
"doi": null,
"abstractUrl": "/magazine/cs/2011/06/mcs2011060064/13rRUIJuxsx",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/04/v0522",
"title": "Explanatory and Illustrative Visualization of Special and General Relativity",
"doi": null,
"abstractUrl": "/journal/tg/2006/04/v0522/13rRUwjXZS0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/04/mcs2011040085",
"title": "Special-Relativistic Visualization",
"doi": null,
"abstractUrl": "/magazine/cs/2011/04/mcs2011040085/13rRUx0ge9P",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKir9",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WK5AlJ",
"doi": "10.1109/iV.2018.00076",
"title": "Concept as a Bridge between Abstraction and Concretization in Design Knowledge Visualization",
"normalizedTitle": "Concept as a Bridge between Abstraction and Concretization in Design Knowledge Visualization",
"abstract": "A framework for concept processing is introduced and discussed. It comprises the major phases of concept development from raw data into final products. It is enhanced by the main tasks needed to proceed from one stage into the next and by the main areas of focus in each phase. Concept derivation represents the core of the concept processing framework. Consequently, it is represented as the main nexus between knowledge abstraction and concretization in design. Eight methods of concept derivation are described. In addition, eight elements of concept translation into tangible design products are presented. The concept generation and translation process is illustrated by an example of framework implementation. The scope of the research is architectural design, but many components of the framework may be applicable to other design fields.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A framework for concept processing is introduced and discussed. It comprises the major phases of concept development from raw data into final products. It is enhanced by the main tasks needed to proceed from one stage into the next and by the main areas of focus in each phase. Concept derivation represents the core of the concept processing framework. Consequently, it is represented as the main nexus between knowledge abstraction and concretization in design. Eight methods of concept derivation are described. In addition, eight elements of concept translation into tangible design products are presented. The concept generation and translation process is illustrated by an example of framework implementation. The scope of the research is architectural design, but many components of the framework may be applicable to other design fields.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A framework for concept processing is introduced and discussed. It comprises the major phases of concept development from raw data into final products. It is enhanced by the main tasks needed to proceed from one stage into the next and by the main areas of focus in each phase. Concept derivation represents the core of the concept processing framework. Consequently, it is represented as the main nexus between knowledge abstraction and concretization in design. Eight methods of concept derivation are described. In addition, eight elements of concept translation into tangible design products are presented. The concept generation and translation process is illustrated by an example of framework implementation. The scope of the research is architectural design, but many components of the framework may be applicable to other design fields.",
"fno": "720200a407",
"keywords": [
"Data Visualisation",
"Human Computer Interaction",
"Systems Analysis",
"Knowledge Concretization",
"Design Products",
"Architectural Design",
"Translation Process",
"Concept Generation",
"Concept Translation",
"Knowledge Abstraction",
"Concept Processing Framework",
"Concept Derivation",
"Concept Development",
"Design Knowledge Visualization",
"Buildings",
"Cognition",
"Data Visualization",
"Bridges",
"Visualization",
"Task Analysis",
"Product Design",
"Knowledge Visualization",
"Concept Generation",
"Concept Translation",
"Design Cognition",
"Ideation",
"Design Process",
"Idea Materialization"
],
"authors": [
{
"affiliation": null,
"fullName": "Buthayna Eilouti",
"givenName": "Buthayna",
"surname": "Eilouti",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-07-01T00:00:00",
"pubType": "proceedings",
"pages": "407-412",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7202-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "720200a401",
"articleId": "17D45XfSETg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "720200a413",
"articleId": "17D45WWzW4E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2008/3268/0/3268a409",
"title": "Cluster-Based Visualization of Concept Associations",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a409/12OmNBBhN7P",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icre/1994/5480/0/00292405",
"title": "The concept of operations: the bridge from operational requirements to technical specifications",
"doi": null,
"abstractUrl": "/proceedings-article/icre/1994/00292405/12OmNBpVQ04",
"parentPublication": {
"id": "proceedings/icre/1994/5480/0",
"title": "Proceedings of IEEE International Conference on Requirements Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2017/3861/0/07991615",
"title": "TouchDoc: A Tool to Bridge the Gap between Physical and Digital Libraries",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2017/07991615/12OmNx7XH5d",
"parentPublication": {
"id": "proceedings/jcdl/2017/3861/0",
"title": "2017 ACM/IEEE Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2014/2874/0/2874a352",
"title": "Visualization for Visual Analytics: Micro-visualization, Abstraction, and Physical Appeal",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2014/2874a352/12OmNySG3Oy",
"parentPublication": {
"id": "proceedings/pacificvis/2014/2874/0",
"title": "2014 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/1995/7119/0/71190012",
"title": "FunZ Designs -- A Bridge Between Z Specifications and Haskell Implementations",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/1995/71190012/12OmNzxyiCS",
"parentPublication": {
"id": "proceedings/compsac/1995/7119/0",
"title": "Proceedings Nineteenth Annual International Computer Software and Applications Conference (COMPSAC'95)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2002/08/i1075",
"title": "Discovering Useful Concept Prototypes for Classification Based on Filtering and Abstraction",
"doi": null,
"abstractUrl": "/journal/tp/2002/08/i1075/13rRUNvyalY",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08025425",
"title": "Pondering the Concept of Abstraction in (Illustrative) Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08025425/13rRUxlgxTt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809846",
"title": "Estimating Color-Concept Associations from Image Statistics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809846/1cHEoEeTId2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08864010",
"title": "A Provenance Task Abstraction Framework",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08864010/1e0YpvcVR7y",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a620",
"title": "Immaterial Architecture: Understanding Visualization Through the Lifecycle of a Building",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a620/1rSRamQHMJO",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKipP",
"title": "2016 19th IEEE Intl Conference on Computational Science and Engineering (CSE), IEEE 14th Intl Conference on Embedded and Ubiquitous Computing (EUC), and 15th Intl Symposium on Distributed Computing and Applications for Business Engineering (DCABES)",
"acronym": "cse-euc",
"groupId": "1002115",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Wda7h4",
"doi": "10.1109/CSE-EUC-DCABES.2016.262",
"title": "Using OpenDX to Teach the Concept of Visualization Pipeline",
"normalizedTitle": "Using OpenDX to Teach the Concept of Visualization Pipeline",
"abstract": "Visualization of experimental data and results of numerical simulations belongs to most basic skills that need to be mastered by students at undergraduate, graduate and postgraduate levels. When faced with many visualization techniques and available tools, deep understanding of the fundamental concepts, such as visualization pipeline, is the necessary foundation for using data visualization effectively. The article shows how concept of pipeline processing can be explained to students and how the tool such as OpenDX, with its visual programming capabilities, can be used to teach basic visualization concepts and techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visualization of experimental data and results of numerical simulations belongs to most basic skills that need to be mastered by students at undergraduate, graduate and postgraduate levels. When faced with many visualization techniques and available tools, deep understanding of the fundamental concepts, such as visualization pipeline, is the necessary foundation for using data visualization effectively. The article shows how concept of pipeline processing can be explained to students and how the tool such as OpenDX, with its visual programming capabilities, can be used to teach basic visualization concepts and techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visualization of experimental data and results of numerical simulations belongs to most basic skills that need to be mastered by students at undergraduate, graduate and postgraduate levels. When faced with many visualization techniques and available tools, deep understanding of the fundamental concepts, such as visualization pipeline, is the necessary foundation for using data visualization effectively. The article shows how concept of pipeline processing can be explained to students and how the tool such as OpenDX, with its visual programming capabilities, can be used to teach basic visualization concepts and techniques.",
"fno": "07982324",
"keywords": [
"Data Visualization",
"Pipeline Processing",
"Tools",
"Visualization",
"Graphical User Interfaces",
"Solid Modeling",
"Pipelines",
"Open DX",
"Pipeline",
"Systems",
"Training",
"Visualisation"
],
"authors": [
{
"affiliation": null,
"fullName": "Anna Perduta",
"givenName": "Anna",
"surname": "Perduta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Roman Putanowicz",
"givenName": "Roman",
"surname": "Putanowicz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cse-euc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-08-01T00:00:00",
"pubType": "proceedings",
"pages": "687-693",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3593-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07982323",
"articleId": "17D45W2WyzC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07982325",
"articleId": "17D45W2Wyyf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2005/2766/0/01532788",
"title": "VisTrails: enabling interactive multiple-view visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532788/12OmNCdBDTX",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410468",
"title": "Adaptive visualization pipeline decomposition and mapping onto computer networks",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410468/12OmNwkzumu",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2015/7568/0/7568a228",
"title": "A Concurrent Architecture Proposal for Information Visualization Pipeline",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a228/12OmNxZ2Glk",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/coginf/2010/8042/0/05599694",
"title": "Application of a tool for ontology visualization",
"doi": null,
"abstractUrl": "/proceedings-article/coginf/2010/05599694/12OmNzZWbRF",
"parentPublication": {
"id": "proceedings/coginf/2010/8042/0",
"title": "2010 9th IEEE International Conference on Cognitive Informatics (ICCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017635",
"title": "Visualization Multi-Pipeline for Communicating Biology",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017635/13rRUILtJmf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/08/07552504",
"title": "Visualization System Requirements for Data Processing Pipeline Design and Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2017/08/07552504/13rRUxd2aZ6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2008/01/ttc2008010055",
"title": "Self-Adaptive Configuration of Visualization Pipeline Over Wide-Area Networks",
"doi": null,
"abstractUrl": "/journal/tc/2008/01/ttc2008010055/13rRUxjQyur",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061683",
"title": "Relation-Aware Volume Exploration Pipeline",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061683/13rRUygT7sw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669719",
"title": "A Fast-Processing Pipeline for Three-dimensional Visualization of Acute Ischemic Stroke lesion topography",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669719/1A9VVWEHKeY",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2022/9156/0/09966395",
"title": "A Prototype for Pipeline-Composable Task-Based Visualization Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2022/09966395/1IT0CzlpPHy",
"parentPublication": {
"id": "proceedings/ldav/2022/9156/0",
"title": "2022 IEEE 12th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrMHOdk",
"title": "Twenty-Third Annual Hawaii International Conference on System Sciences",
"acronym": "hicss",
"groupId": "1000730",
"volume": "1",
"displayVolume": "1",
"year": "1990",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqIzgUx",
"doi": "10.1109/HICSS.1990.205117",
"title": "CAPS: a connectionist architecture for production systems",
"normalizedTitle": "CAPS: a connectionist architecture for production systems",
"abstract": "CAPS supports most features of the OPS5 language, including variables, negation, and conjunction and disjunction of conditions. The architecture uses local representations to facilitate dynamic variable bindings and to reduce the number of interconnections within the network, thus making hardware implementations more feasible. The CAPS processing elements are simple and relatively easy to fabricate. A hardware implementation of a CAPS network can potentially provide a 200-800 fold increase in parallelism over serial implementations. The CAPS architecture is tested by transforming small OPS5 programs into connectionist networks and simulating them on a connectionist simulator. It is demonstrated that connectionist architectures can perform rule-based symbolic reasoning and can support dynamic variable bindings.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "CAPS supports most features of the OPS5 language, including variables, negation, and conjunction and disjunction of conditions. The architecture uses local representations to facilitate dynamic variable bindings and to reduce the number of interconnections within the network, thus making hardware implementations more feasible. The CAPS processing elements are simple and relatively easy to fabricate. A hardware implementation of a CAPS network can potentially provide a 200-800 fold increase in parallelism over serial implementations. The CAPS architecture is tested by transforming small OPS5 programs into connectionist networks and simulating them on a connectionist simulator. It is demonstrated that connectionist architectures can perform rule-based symbolic reasoning and can support dynamic variable bindings.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "CAPS supports most features of the OPS5 language, including variables, negation, and conjunction and disjunction of conditions. The architecture uses local representations to facilitate dynamic variable bindings and to reduce the number of interconnections within the network, thus making hardware implementations more feasible. The CAPS processing elements are simple and relatively easy to fabricate. A hardware implementation of a CAPS network can potentially provide a 200-800 fold increase in parallelism over serial implementations. The CAPS architecture is tested by transforming small OPS5 programs into connectionist networks and simulating them on a connectionist simulator. It is demonstrated that connectionist architectures can perform rule-based symbolic reasoning and can support dynamic variable bindings.",
"fno": "00205117",
"keywords": [
"Knowledge Engineering",
"Neural Nets",
"Connectionist Architecture",
"Production Systems",
"OPS 5 Language",
"Negation",
"Conjunction",
"Disjunction",
"Local Representations",
"Dynamic Variable Bindings",
"CAPS Processing Elements",
"CAPS Network",
"Parallelism",
"CAPS Architecture",
"Connectionist Networks",
"Connectionist Simulator",
"Connectionist Architectures",
"Rule Based Symbolic Reasoning",
"Production Systems",
"Expert Systems",
"Vehicle Dynamics",
"Hardware",
"System Performance",
"Testing",
"Neural Networks",
"Robustness",
"Vehicles",
"Buildings"
],
"authors": [
{
"affiliation": "Dept. of Electr. Eng., Waterloo Univ., Ont., Canada",
"fullName": "A.S. Bhogal",
"givenName": "A.S.",
"surname": "Bhogal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng., Waterloo Univ., Ont., Canada",
"fullName": "R.E. Seviora",
"givenName": "R.E.",
"surname": "Seviora",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng., Waterloo Univ., Ont., Canada",
"fullName": "M.I. Elmasry",
"givenName": "M.I.",
"surname": "Elmasry",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "1990-01-01T00:00:00",
"pubType": "proceedings",
"pages": "202-211 vol.1",
"year": "1990",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00205116",
"articleId": "12OmNANBZuM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00205119",
"articleId": "12OmNyeECCJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icsa/2017/5729/0/5729a211",
"title": "CAPS: Architecture Description of Situational Aware Cyber Physical Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icsa/2017/5729a211/12OmNAle6qK",
"parentPublication": {
"id": "proceedings/icsa/2017/5729/0",
"title": "2017 IEEE International Conference on Software Architecture (ICSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/2/00119369",
"title": "A connectionist classifier architecture applied to image segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00119369/12OmNBziBbb",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/2",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/annes/1993/4260/0/00323039",
"title": "Connectionist expert systems",
"doi": null,
"abstractUrl": "/proceedings-article/annes/1993/00323039/12OmNCbU2PZ",
"parentPublication": {
"id": "proceedings/annes/1993/4260/0",
"title": "1993 First New Zealand International Two-Stream Conference on Artificial Neural Networks and Expert Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tai/1989/1984/0/00065341",
"title": "A family of cuts for production systems",
"doi": null,
"abstractUrl": "/proceedings-article/tai/1989/00065341/12OmNwNwzJ4",
"parentPublication": {
"id": "proceedings/tai/1989/1984/0",
"title": "IEEE International Workshop on Tools for Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tai/1994/6785/0/00346399",
"title": "Constructs for building complex symbolic-connectionist systems",
"doi": null,
"abstractUrl": "/proceedings-article/tai/1994/00346399/12OmNxVlTHw",
"parentPublication": {
"id": "proceedings/tai/1994/6785/0",
"title": "Proceedings Sixth International Conference on Tools with Artificial Intelligence. TAI 94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/caia/1995/7070/0/70700203",
"title": "Predicting the response time of OPS5-style production systems",
"doi": null,
"abstractUrl": "/proceedings-article/caia/1995/70700203/12OmNxwncpc",
"parentPublication": {
"id": "proceedings/caia/1995/7070/0",
"title": "Artificial Intelligence for Applications, Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tai/1992/2905/0/00246374",
"title": "A hybrid/symbolic connectionist production system",
"doi": null,
"abstractUrl": "/proceedings-article/tai/1992/00246374/12OmNyPQ4As",
"parentPublication": {
"id": "proceedings/tai/1992/2905/0",
"title": "TAI '92 - Proceedings Fourth International Conference on Tools with Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2004/12/k1543",
"title": "Self-Stabilizing Real-Time OPS5 Production Systems",
"doi": null,
"abstractUrl": "/journal/tk/2004/12/k1543/13rRUwbs218",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2004/02/k0271",
"title": "A Graph-Based Approach for Timing Analysis and Refinement of OPS5 Knowledge-Based Systems",
"doi": null,
"abstractUrl": "/journal/tk/2004/02/k0271/13rRUxAATgH",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2021/0032/0/09555527",
"title": "Exploring Aspects Regarding Reasoning in Neuro-Symbolic Rules and Connectionist Expert Systems",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2021/09555527/1xxcmZXTaNy",
"parentPublication": {
"id": "proceedings/iisa/2021/0032/0",
"title": "2021 12th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNz4BdvV",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwFid5T",
"doi": "10.1109/ICMEW.2012.37",
"title": "Cross-Layered Hidden Markov Modeling for Surveillance Event Recognition",
"normalizedTitle": "Cross-Layered Hidden Markov Modeling for Surveillance Event Recognition",
"abstract": "In this paper, a novel Cross-Layered Hidden Markov Model (CLHMM) is proposed for high accuracy and low complexity Surveillance Event Recognition (SER). Unlike existing Layered HMM (LHMM) whose inferences are limited in adjacent layers, cross-layer inferences are designed in CLHMM to strengthen reasoning efficiency and reduce computational complexity. One Common Feature Particle Set (CFPS) is also developed in CLHMM to offer the model an assembly of pixel level observations, expert knowledge and Baum-Welch algorithm are combined to achieve optimized performance in CLHMM learning. Experimental results on typical surveillance test sequences showed that CLHMM outperforms LHMM in terms of accuracy and computational complexity.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a novel Cross-Layered Hidden Markov Model (CLHMM) is proposed for high accuracy and low complexity Surveillance Event Recognition (SER). Unlike existing Layered HMM (LHMM) whose inferences are limited in adjacent layers, cross-layer inferences are designed in CLHMM to strengthen reasoning efficiency and reduce computational complexity. One Common Feature Particle Set (CFPS) is also developed in CLHMM to offer the model an assembly of pixel level observations, expert knowledge and Baum-Welch algorithm are combined to achieve optimized performance in CLHMM learning. Experimental results on typical surveillance test sequences showed that CLHMM outperforms LHMM in terms of accuracy and computational complexity.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a novel Cross-Layered Hidden Markov Model (CLHMM) is proposed for high accuracy and low complexity Surveillance Event Recognition (SER). Unlike existing Layered HMM (LHMM) whose inferences are limited in adjacent layers, cross-layer inferences are designed in CLHMM to strengthen reasoning efficiency and reduce computational complexity. One Common Feature Particle Set (CFPS) is also developed in CLHMM to offer the model an assembly of pixel level observations, expert knowledge and Baum-Welch algorithm are combined to achieve optimized performance in CLHMM learning. Experimental results on typical surveillance test sequences showed that CLHMM outperforms LHMM in terms of accuracy and computational complexity.",
"fno": "06266251",
"keywords": [
"Computational Complexity",
"Hidden Markov Models",
"Image Sequences",
"Learning Artificial Intelligence",
"Video Surveillance",
"Cross Layered Hidden Markov Modeling",
"Low Complexity Surveillance Event Recognition",
"SER",
"Cross Layer Inferences",
"Computational Complexity",
"Common Feature Particle Set",
"CFPS",
"Pixel Level Observations",
"Baum Welch Algorithm",
"CLHMM Learning",
"Surveillance Test Sequences",
"Hidden Markov Models",
"Surveillance",
"Accuracy",
"Feature Extraction",
"Robustness",
"Computational Modeling",
"Complexity Theory",
"Surveillance Event Recognition",
"Cross Layered Hidden Markov Model",
"Common Feature Particle Set"
],
"authors": [
{
"affiliation": null,
"fullName": "Chongyang Zhang",
"givenName": "Chongyang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jingbang Qiu",
"givenName": "Jingbang",
"surname": "Qiu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shibao Zheng",
"givenName": "Shibao",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaokang Yang",
"givenName": "Xiaokang",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "175-180",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2027-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06266250",
"articleId": "12OmNwGZNJy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06266252",
"articleId": "12OmNzt0IvQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118c561",
"title": "A Hierarchical Context Model for Event Recognition in Surveillance Video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c561/12OmNCzsKEx",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460339",
"title": "Driving support by estimating vehicle behavior",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460339/12OmNqBtiCZ",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2014/7394/0/7394a123",
"title": "Fault Diagnosing ECG in Body Sensor Networks Based on Hidden Markov Model",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2014/7394a123/12OmNrNh0LH",
"parentPublication": {
"id": "proceedings/msn/2014/7394/0",
"title": "2014 10th International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icetet/2008/3267/0/3267a641",
"title": "Text-Independent Speaker Identification Using Hidden Markov Models",
"doi": null,
"abstractUrl": "/proceedings-article/icetet/2008/3267a641/12OmNxdm4CF",
"parentPublication": {
"id": "proceedings/icetet/2008/3267/0",
"title": "Emerging Trends in Engineering & Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981803",
"title": "HMM-MIO: An enhanced hidden Markov model for action recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981803/12OmNxveNJ6",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2018/4652/0/465201a238",
"title": "SARRI: A SmArt Rapiro Robot Integrating a Framework for Automatic High-Level Surveillance Event Detection",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2018/465201a238/12OmNyFU77E",
"parentPublication": {
"id": "proceedings/irc/2018/4652/0",
"title": "2018 Second IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2015/9618/2/9618b344",
"title": "A Layered Hidden Markov Model for Predicting Human Trajectories in a Multi-floor Building",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2015/9618b344/12OmNyOq55O",
"parentPublication": {
"id": "proceedings/wi-iat/2015/9618/2",
"title": "2015 IEEE / WIC / ACM International Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2017/3581/0/3581b478",
"title": "Knowledge Extraction from Source Code Based on Hidden Markov Model: Application to EPICAM",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2017/3581b478/12OmNyfdOOZ",
"parentPublication": {
"id": "proceedings/aiccsa/2017/3581/0",
"title": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2012/4797/0/4797a118",
"title": "Selective Background Adaptation Based Abnormal Acoustic Event Recognition for Audio Surveillance",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2012/4797a118/12OmNzaQoxu",
"parentPublication": {
"id": "proceedings/avss/2012/4797/0",
"title": "2012 IEEE Ninth International Conference on Advanced Video and Signal-Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313477",
"title": "A Novel Algorithm for Training Hidden Markov Models with Positive and Negative Examples",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313477/1qmfXLorD5C",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNykCcdi",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyv7m1Y",
"doi": "10.1109/CVPRW.2016.194",
"title": "Sequential Face Alignment via Person-Specific Modeling in the Wild",
"normalizedTitle": "Sequential Face Alignment via Person-Specific Modeling in the Wild",
"abstract": "Sequential face alignment, in essence, deals with nonrigid deformation that changes over time. Although numerous methods have been proposed to show impressive success on still images, many of them still suffer from limited performance when it comes to sequential alignment in wild scenarios, e.g., involving large pose/expression variations and partial occlusions. The underlying reason is that they usually perform sequential alignment by independently applying models trained offline in each frame in a tracking-by-detection manner but completely ignoring temporal constraints that become available in sequence. To address this issue, we propose to exploit incremental learning for person-specific alignment. Our approach takes advantage of part-based representation and cascade regression for robust and efficient alignment on each frame. More importantly, it incrementally updates the representation subspace and simultaneously adapts the cascade regressors in parallel using a unified framework. Person-specific modeling is eventually achieved on the fly while the drifting issue is significantly alleviated by erroneous detection using both part and holistic descriptors. Extensive experiments on both controlled and in-the-wild datasets demonstrate the superior performance of our approach compared with the state of the arts in terms of fitting accuracy and efficiency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Sequential face alignment, in essence, deals with nonrigid deformation that changes over time. Although numerous methods have been proposed to show impressive success on still images, many of them still suffer from limited performance when it comes to sequential alignment in wild scenarios, e.g., involving large pose/expression variations and partial occlusions. The underlying reason is that they usually perform sequential alignment by independently applying models trained offline in each frame in a tracking-by-detection manner but completely ignoring temporal constraints that become available in sequence. To address this issue, we propose to exploit incremental learning for person-specific alignment. Our approach takes advantage of part-based representation and cascade regression for robust and efficient alignment on each frame. More importantly, it incrementally updates the representation subspace and simultaneously adapts the cascade regressors in parallel using a unified framework. Person-specific modeling is eventually achieved on the fly while the drifting issue is significantly alleviated by erroneous detection using both part and holistic descriptors. Extensive experiments on both controlled and in-the-wild datasets demonstrate the superior performance of our approach compared with the state of the arts in terms of fitting accuracy and efficiency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Sequential face alignment, in essence, deals with nonrigid deformation that changes over time. Although numerous methods have been proposed to show impressive success on still images, many of them still suffer from limited performance when it comes to sequential alignment in wild scenarios, e.g., involving large pose/expression variations and partial occlusions. The underlying reason is that they usually perform sequential alignment by independently applying models trained offline in each frame in a tracking-by-detection manner but completely ignoring temporal constraints that become available in sequence. To address this issue, we propose to exploit incremental learning for person-specific alignment. Our approach takes advantage of part-based representation and cascade regression for robust and efficient alignment on each frame. More importantly, it incrementally updates the representation subspace and simultaneously adapts the cascade regressors in parallel using a unified framework. Person-specific modeling is eventually achieved on the fly while the drifting issue is significantly alleviated by erroneous detection using both part and holistic descriptors. Extensive experiments on both controlled and in-the-wild datasets demonstrate the superior performance of our approach compared with the state of the arts in terms of fitting accuracy and efficiency.",
"fno": "1437b558",
"keywords": [
"Face Recognition",
"Image Representation",
"Learning Artificial Intelligence",
"Object Detection",
"Object Tracking",
"Regression Analysis",
"Sequential Face Alignment",
"Person Specific Modeling",
"Expression Variation",
"Pose Variation",
"Partial Occlusions",
"Incremental Learning",
"Person Specific Alignment",
"Part Based Representation",
"Cascade Regression",
"Representation Subspace",
"Drifting Issue",
"In The Wild Dataset",
"Controlled Dataset",
"Tracking By Detection",
"Face",
"Shape",
"Adaptation Models",
"Deformable Models",
"Robustness",
"Tensile Stress",
"Computational Modeling"
],
"authors": [
{
"affiliation": "Rutgers Univ., Piscataway, NJ, USA",
"fullName": "Xi Peng",
"givenName": "Xi",
"surname": "Peng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Texas at Arlington, Arlington, TX, USA",
"fullName": "Junzhou Huang",
"givenName": "Junzhou",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Rutgers Univ., Piscataway, NJ, USA",
"fullName": "Dimitris N. Metaxas",
"givenName": "Dimitris N.",
"surname": "Metaxas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1558-1567",
"year": "2016",
"issn": "2160-7516",
"isbn": "978-1-5090-1437-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1437b551",
"articleId": "12OmNwBT1r9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1437b568",
"articleId": "12OmNyQ7FDq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118b859",
"title": "Incremental Face Alignment in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118b859/12OmNBRKwBU",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d880",
"title": "PIEFA: Personalized Incremental and Ensemble Face Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d880/12OmNBv2Cm1",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118b789",
"title": "RAPS: Robust and Efficient Automatic Construction of Person-Specific Deformable Models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118b789/12OmNxYL5av",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118b851",
"title": "Gauss-Newton Deformable Part Models for Face Alignment In-the-Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118b851/12OmNzxgHCo",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/11/08094942",
"title": "PD2T: Person-Specific Detection, Deformable Tracking",
"doi": null,
"abstractUrl": "/journal/tp/2018/11/08094942/143fgZv9pNS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c171",
"title": "Learning to Align Sequential Actions in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c171/1H1khxjXUnm",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a035",
"title": "End-to-End Cascade CNN for Simultaneously Face Detection and Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a035/1ap5yO6FAzu",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g892",
"title": "DeCaFA: Deep Convolutional Cascade for Face Alignment in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g892/1hQqqGrLbdS",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0675",
"title": "Cross-Modal Cross-Domain Moment Alignment Network for Person Search",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0675/1m3ocdPKH96",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdia/2020/2232/0/223200a228",
"title": "Embedding Attribute and Relation Information for Person Entity Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/bigdia/2020/223200a228/1stvw6tcRVK",
"parentPublication": {
"id": "proceedings/bigdia/2020/2232/0",
"title": "2020 6th International Conference on Big Data and Information Analytics (BigDIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8wTfL",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz61dGx",
"doi": "10.1109/ICPR.2008.4761314",
"title": "Finite element modeling of facial deformation in videos for computing strain pattern",
"normalizedTitle": "Finite element modeling of facial deformation in videos for computing strain pattern",
"abstract": "We present a finite element modeling based approach to compute strain patterns caused by facial deformation during expressions in videos. A sparse motion field computed through a robust optical flow method drives the FE model. While the geometry of the model is generic, the material constants associated with an individualpsilas facial skin are learned at a coarse level sufficient for accurate strain map computation. Experimental results using the computational strategy presented in this paper emphasize the uniqueness and stability of strain maps across adverse data conditions (shadow lighting and face camouflage) making it a promising feature for image analysis tasks that can benefit from such auxiliary information.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a finite element modeling based approach to compute strain patterns caused by facial deformation during expressions in videos. A sparse motion field computed through a robust optical flow method drives the FE model. While the geometry of the model is generic, the material constants associated with an individualpsilas facial skin are learned at a coarse level sufficient for accurate strain map computation. Experimental results using the computational strategy presented in this paper emphasize the uniqueness and stability of strain maps across adverse data conditions (shadow lighting and face camouflage) making it a promising feature for image analysis tasks that can benefit from such auxiliary information.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a finite element modeling based approach to compute strain patterns caused by facial deformation during expressions in videos. A sparse motion field computed through a robust optical flow method drives the FE model. While the geometry of the model is generic, the material constants associated with an individualpsilas facial skin are learned at a coarse level sufficient for accurate strain map computation. Experimental results using the computational strategy presented in this paper emphasize the uniqueness and stability of strain maps across adverse data conditions (shadow lighting and face camouflage) making it a promising feature for image analysis tasks that can benefit from such auxiliary information.",
"fno": "04761314",
"keywords": [
"Face Recognition",
"Finite Element Analysis",
"Image Sequences",
"Video Signal Processing",
"Strain Pattern",
"Finite Element Modeling",
"Facial Deformation",
"Sparse Motion Field",
"Robust Optical Flow Method",
"Strain Map Computation",
"Image Analysis",
"Finite Element Methods",
"Deformable Models",
"Videos",
"Capacitive Sensors",
"Image Motion Analysis",
"Optical Computing",
"Robustness",
"Geometrical Optics",
"Computational Geometry",
"Solid Modeling"
],
"authors": [
{
"affiliation": "Computer Science & Engineering, University of South Florida, USA",
"fullName": "Vasant Manohar",
"givenName": "Vasant",
"surname": "Manohar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science & Engineering, University of South Florida, USA",
"fullName": "Matthew Shreve",
"givenName": "Matthew",
"surname": "Shreve",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science & Engineering, University of South Florida, USA",
"fullName": "Dmitry Goldgof",
"givenName": "Dmitry",
"surname": "Goldgof",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science & Engineering, University of South Florida, USA",
"fullName": "Sudeep Sarkar",
"givenName": "Sudeep",
"surname": "Sarkar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1051-4651",
"isbn": "978-1-4244-2174-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04761313",
"articleId": "12OmNwqfsXl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04761315",
"articleId": "12OmNCdk2TO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dicta/2005/2467/0/24670023",
"title": "3D Strain Detection of a Support Implant for an Artificial Hip Joint Using Finite Element Method and Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2005/24670023/12OmNAqCtMj",
"parentPublication": {
"id": "proceedings/dicta/2005/2467/0",
"title": "Digital Image Computing: Techniques and Applications (DICTA'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2010/4047/2/4047b175",
"title": "Finite Element Calculation of Residual Stress and Cold-work Hardening Induced in Inconel 718 by Low Plasticity Burnishing",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/4047b175/12OmNCfAPJk",
"parentPublication": {
"id": "proceedings/icic/2010/4047/1",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipps/1994/5602/0/0288255",
"title": "A clustered reduced communication element by element preconditioned conjugate gradient algorithm for finite element computations",
"doi": null,
"abstractUrl": "/proceedings-article/ipps/1994/0288255/12OmNCgrD2p",
"parentPublication": {
"id": "proceedings/ipps/1994/5602/0",
"title": "Parallel Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2007/2794/0/27940042",
"title": "Facial Strain Pattern as a Soft Forensic Evidence",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2007/27940042/12OmNqNos5l",
"parentPublication": {
"id": "proceedings/wacv/2007/2794/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2017/4868/0/07832239",
"title": "Impact Abdominal Injury Analysis Using a 6-Year-Old Pediatric Occupant Abdomen Finite Element Model",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2017/07832239/12OmNro0I06",
"parentPublication": {
"id": "proceedings/icmtma/2017/4868/0",
"title": "2017 9th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmens/2003/1947/0/19470292",
"title": "Finite Element Simulation and Theoretical Analysis of Microelectromechanical System Relays",
"doi": null,
"abstractUrl": "/proceedings-article/icmens/2003/19470292/12OmNvlxJz0",
"parentPublication": {
"id": "proceedings/icmens/2003/1947/0",
"title": "MEMS, NANO, and Smart Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1993/3870/0/00378169",
"title": "A finite element model for 3D shape reconstruction and nonrigid motion tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378169/12OmNyaGeGm",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2017/4868/0/07832237",
"title": "Development of a Visco-Hyperelastic Constitutive Law for Brain Tissue Based on Finite Element Simulation and Optimization Methodology",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2017/07832237/12OmNzFdt9C",
"parentPublication": {
"id": "proceedings/icmtma/2017/4868/0",
"title": "2017 9th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771451",
"title": "Macro- and micro-expression spotting in long videos using spatio-temporal strain",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771451/12OmNzlD9wB",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeas/2021/9768/0/976800a073",
"title": "Modelling the Deformation Characteristics of Titanium Diaphragm for a Spacecraft Propellant Tank",
"doi": null,
"abstractUrl": "/proceedings-article/icmeas/2021/976800a073/1zuuUSh0fD2",
"parentPublication": {
"id": "proceedings/icmeas/2021/9768/0",
"title": "2021 7th International Conference on Mechanical Engineering and Automation Science (ICMEAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0LC9fRPBm",
"doi": "10.1109/CVPR52688.2022.01284",
"title": "Differentiable Dynamics for Articulated 3d Human Motion Reconstruction",
"normalizedTitle": "Differentiable Dynamics for Articulated 3d Human Motion Reconstruction",
"abstract": "We introduce DiffPhy, a differentiable physics-based model for articulated 3d human motion reconstruction from video. Applications of physics-based reasoning in human motion analysis have so far been limited, both by the complexity of constructing adequate physical models of articulated human motion, and by the formidable challenges of performing stable and efficient inference with physics in the loop. We jointly address such modeling and inference challenges by proposing an approach that combines a physically plausible body representation with anatomical joint limits, a differentiable physics simulator, and optimization techniques that ensure good performance and robustness to suboptimal local optima. In contrast to several recent methods [39], [42], [55], our approach readily supports full-body contact including interactions with objects in the scene. Most importantly, our model connects end-to-end with images, thus supporting direct gradient-based physics optimization by means of image-based loss functions. We validate the model by demonstrating that it can accurately reconstruct physically plausible 3d human motion from monocular video, both on public benchmarks with available 3d ground-truth, and on videos from the internet.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce DiffPhy, a differentiable physics-based model for articulated 3d human motion reconstruction from video. Applications of physics-based reasoning in human motion analysis have so far been limited, both by the complexity of constructing adequate physical models of articulated human motion, and by the formidable challenges of performing stable and efficient inference with physics in the loop. We jointly address such modeling and inference challenges by proposing an approach that combines a physically plausible body representation with anatomical joint limits, a differentiable physics simulator, and optimization techniques that ensure good performance and robustness to suboptimal local optima. In contrast to several recent methods [39], [42], [55], our approach readily supports full-body contact including interactions with objects in the scene. Most importantly, our model connects end-to-end with images, thus supporting direct gradient-based physics optimization by means of image-based loss functions. We validate the model by demonstrating that it can accurately reconstruct physically plausible 3d human motion from monocular video, both on public benchmarks with available 3d ground-truth, and on videos from the internet.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce DiffPhy, a differentiable physics-based model for articulated 3d human motion reconstruction from video. Applications of physics-based reasoning in human motion analysis have so far been limited, both by the complexity of constructing adequate physical models of articulated human motion, and by the formidable challenges of performing stable and efficient inference with physics in the loop. We jointly address such modeling and inference challenges by proposing an approach that combines a physically plausible body representation with anatomical joint limits, a differentiable physics simulator, and optimization techniques that ensure good performance and robustness to suboptimal local optima. In contrast to several recent methods [39], [42], [55], our approach readily supports full-body contact including interactions with objects in the scene. Most importantly, our model connects end-to-end with images, thus supporting direct gradient-based physics optimization by means of image-based loss functions. We validate the model by demonstrating that it can accurately reconstruct physically plausible 3d human motion from monocular video, both on public benchmarks with available 3d ground-truth, and on videos from the internet.",
"fno": "694600n3180",
"keywords": [
"Image Motion Analysis",
"Image Reconstruction",
"Image Sequences",
"Inference Mechanisms",
"Optimisation",
"Pose Estimation",
"Video Signal Processing",
"Differentiable Dynamics",
"Articulated 3",
"Human Motion Reconstruction",
"Differentiable Physics Based Model",
"Physics Based Reasoning",
"Human Motion Analysis",
"Adequate Physical Models",
"Articulated Human Motion",
"Formidable Challenges",
"Stable Inference",
"Modeling Inference Challenges",
"Physically Plausible Body Representation",
"Anatomical Joint Limits",
"Differentiable Physics Simulator",
"Robustness",
"Full Body Contact Including Interactions",
"Direct Gradient Based Physics Optimization",
"Image Based Loss Functions",
"Solid Modeling",
"Computer Vision",
"Three Dimensional Displays",
"Dynamics",
"Robustness",
"Pattern Recognition",
"Motion Analysis"
],
"authors": [
{
"affiliation": "Google Research",
"fullName": "Erik Gärtner",
"givenName": "Erik",
"surname": "Gärtner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research",
"fullName": "Mykhaylo Andriluka",
"givenName": "Mykhaylo",
"surname": "Andriluka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research",
"fullName": "Erwin Coumans",
"givenName": "Erwin",
"surname": "Coumans",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research",
"fullName": "Cristian Sminchisescu",
"givenName": "Cristian",
"surname": "Sminchisescu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "13180-13190",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0LC69pNpC",
"name": "pcvpr202269460-09879528s1-mm_694600n3180.zip",
"size": "148 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879528s1-mm_694600n3180.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600n3169",
"articleId": "1H1njBRGx3y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600n3191",
"articleId": "1H1jh510fDy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460265",
"title": "Camera-less articulated trajectory reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460265/12OmNBOll2F",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2015/9548/0/9548a470",
"title": "Human Motion Analysis and Vision-Based Articulated Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2015/9548a470/12OmNs59JRw",
"parentPublication": {
"id": "proceedings/ichi/2015/9548/0",
"title": "2015 International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d595",
"title": "3D Articulated Shape Segmentation Using Motion Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d595/12OmNscxj11",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mnrao/1994/6435/0/00346245",
"title": "Determining articulated motion from perspective views: a decomposition approach",
"doi": null,
"abstractUrl": "/proceedings-article/mnrao/1994/00346245/12OmNxR5USS",
"parentPublication": {
"id": "proceedings/mnrao/1994/6435/0",
"title": "Proceedings of 1994 IEEE Workshop on Motion of Non-rigid and Articulated Objects",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1999/0167/0/01670220",
"title": "Recursive Dynamics and Optimal Control Techniques for Human Motion Planning",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1999/01670220/12OmNxvNZUe",
"parentPublication": {
"id": "proceedings/ca/1999/0167/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/05/ttp2008050865",
"title": "A Factorization-Based Approach for Articulated Nonrigid Shape, Motion and Kinematic Chain Recovery From Video",
"doi": null,
"abstractUrl": "/journal/tp/2008/05/ttp2008050865/13rRUwcS1E8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/04/i0625",
"title": "Motion Analysis of Articulated Objects from Monocular Images",
"doi": null,
"abstractUrl": "/journal/tp/2006/04/i0625/13rRUwvBy9U",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2003/09/i1182",
"title": "Articulated Soft Objects for Multiview Shape and Motion Capture",
"doi": null,
"abstractUrl": "/journal/tp/2003/09/i1182/13rRUxOdD3D",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/04/ttp2011040780",
"title": "Linearized Motion Estimation for Articulated Planes",
"doi": null,
"abstractUrl": "/journal/tp/2011/04/ttp2011040780/13rRUynHukn",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g407",
"title": "Neural MoCon: Neural Motion Control for Physically Plausible Human Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g407/1H1mMfYcmha",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNs0kyru",
"title": "2007 IEEE Virtual Reality Conference",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy5R3CD",
"doi": "10.1109/VR.2007.352475",
"title": "A Personal Surround Environment: Projective Display with Correction for Display Surface Geometry and Extreme Lens Distortion",
"normalizedTitle": "A Personal Surround Environment: Projective Display with Correction for Display Surface Geometry and Extreme Lens Distortion",
"abstract": "Projectors equipped with wide-angle lenses can have an advantage over traditional projectors in creating immersive display environments since they can be placed very close to the display surface to reduce user shadowing issues while still producing large images. However, wide-angle projectors exhibit severe image distortion requiring the image generator to correctively pre-distort the output image. In this paper, we describe a new technique based on Raskar's (1998) two-pass rendering algorithm that is able to correct for both arbitrary display surface geometry and the extreme lens distortion caused by fisheye lenses. We further detail how the distortion correction algorithm can be implemented in a real-time shader program running on a commodity GPU to create low-cost, personal surround environments",
"abstracts": [
{
"abstractType": "Regular",
"content": "Projectors equipped with wide-angle lenses can have an advantage over traditional projectors in creating immersive display environments since they can be placed very close to the display surface to reduce user shadowing issues while still producing large images. However, wide-angle projectors exhibit severe image distortion requiring the image generator to correctively pre-distort the output image. In this paper, we describe a new technique based on Raskar's (1998) two-pass rendering algorithm that is able to correct for both arbitrary display surface geometry and the extreme lens distortion caused by fisheye lenses. We further detail how the distortion correction algorithm can be implemented in a real-time shader program running on a commodity GPU to create low-cost, personal surround environments",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Projectors equipped with wide-angle lenses can have an advantage over traditional projectors in creating immersive display environments since they can be placed very close to the display surface to reduce user shadowing issues while still producing large images. However, wide-angle projectors exhibit severe image distortion requiring the image generator to correctively pre-distort the output image. In this paper, we describe a new technique based on Raskar's (1998) two-pass rendering algorithm that is able to correct for both arbitrary display surface geometry and the extreme lens distortion caused by fisheye lenses. We further detail how the distortion correction algorithm can be implemented in a real-time shader program running on a commodity GPU to create low-cost, personal surround environments",
"fno": "04161017",
"keywords": [
"Distortion",
"Optical Projectors",
"Rendering Computer Graphics",
"Personal Surround Environment",
"Display Surface Geometry",
"Extreme Lens Distortion",
"Immersive Display Environments",
"Rendering",
"Geometry",
"Lenses",
"Rendering Computer Graphics",
"Computer Displays",
"Computer Graphics",
"Three Dimensional Displays",
"Shadow Mapping",
"Image Generation",
"Virtual Reality",
"Shape",
"Projector Displays",
"Lens Distortion Correction",
"GPU Programming",
"I 3 3 Computer Graphics Picture Image Generation Display Algorithms",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality"
],
"authors": [
{
"affiliation": "University of North Carolina, Chapel Hill, tmjohns@cs.unc.edu",
"fullName": "Tyler Johnson",
"givenName": "Tyler",
"surname": "Johnson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina, Chapel Hill, florian@cs.unc.edu",
"fullName": "Florian Gyarfas",
"givenName": "Florian",
"surname": "Gyarfas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina, Chapel Hill, skarbez@cs.unc.edu",
"fullName": "Rick Skarbez",
"givenName": "Rick",
"surname": "Skarbez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina, Chapel Hill, towles@cs.unc.edu",
"fullName": "Herman Towles",
"givenName": "Herman",
"surname": "Towles",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina, Chapel Hill, fuchs@cs.unc.edu",
"fullName": "Henry Fuchs",
"givenName": "Henry",
"surname": "Fuchs",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-03-01T00:00:00",
"pubType": "proceedings",
"pages": "147-154",
"year": "2007",
"issn": "1087-8270",
"isbn": "1-4244-0905-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04161016",
"articleId": "12OmNBQkx2K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04161003",
"articleId": "12OmNwsNRa8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2004/2244/0/01410480",
"title": "A survey of multi-projector tiled display wall construction",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410480/12OmNAWH9up",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480768",
"title": "Automultiscopic display by revolving flat-panel displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480768/12OmNAolGTH",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981693",
"title": "Prototyping a light field display involving direct observation of a video projector array",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981693/12OmNB1wkNJ",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04285089",
"title": "A High Resolution Video Display System by Seamlessly Tiling Multiple Projectors",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04285089/12OmNB9bvdW",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/01667679",
"title": "Long Visualization Depth Autostereoscopic Display using Light Field Rendering based Integral Videography",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/01667679/12OmNvDZEZe",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532818",
"title": "The magic volume lens: an interactive focus+context technique for volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532818/12OmNyuyade",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2007/3056/0/30560079",
"title": "Room-sized Immersive Projection Display for Tele-immersion Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2007/30560079/12OmNzUxOgY",
"parentPublication": {
"id": "proceedings/icat/2007/3056/0",
"title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446433",
"title": "A Calibration Method for Large-Scale Projection Based Floor Display System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446433/13bd1gJ1v0M",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1368",
"title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900d688",
"title": "Evaluating the Impact of Wide-Angle Lens Distortion on Learning-based Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900d688/1wzs0Hn2dUY",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwl8GHU",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzEVRZL",
"doi": "10.1109/3DUI.2013.6550218",
"title": "Poster: Portable integral photography input/ output system using tablet PC and fly's eye lenses",
"normalizedTitle": "Poster: Portable integral photography input/ output system using tablet PC and fly's eye lenses",
"abstract": "We present a new system that can input and output integral photography images. The system consists of a commercially-available tablet PC to which fly's eye lenses, one of which is for input and the other for output IP images, are attached. Light rays emitted from real objects are condensed by first and second convex lens and transferred to a fly's eye lens that consists of n × n small convex lenses, each of which corresponds to a viewpoint. The resulting n × n images are taken with the camera on the tablet PC and used to synthesize an IP image with our original software, which is displayed on the screen of the tablet PC and observed through the fly's eye lens for output projected onto the screen. We found through an experiment that excellent stereoscopic images could be observed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a new system that can input and output integral photography images. The system consists of a commercially-available tablet PC to which fly's eye lenses, one of which is for input and the other for output IP images, are attached. Light rays emitted from real objects are condensed by first and second convex lens and transferred to a fly's eye lens that consists of n × n small convex lenses, each of which corresponds to a viewpoint. The resulting n × n images are taken with the camera on the tablet PC and used to synthesize an IP image with our original software, which is displayed on the screen of the tablet PC and observed through the fly's eye lens for output projected onto the screen. We found through an experiment that excellent stereoscopic images could be observed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a new system that can input and output integral photography images. The system consists of a commercially-available tablet PC to which fly's eye lenses, one of which is for input and the other for output IP images, are attached. Light rays emitted from real objects are condensed by first and second convex lens and transferred to a fly's eye lens that consists of n × n small convex lenses, each of which corresponds to a viewpoint. The resulting n × n images are taken with the camera on the tablet PC and used to synthesize an IP image with our original software, which is displayed on the screen of the tablet PC and observed through the fly's eye lens for output projected onto the screen. We found through an experiment that excellent stereoscopic images could be observed.",
"fno": "06550218",
"keywords": [
"Lenses",
"IP Networks",
"Cameras",
"Three Dimensional Displays",
"Photography",
"Arrays",
"Autostereoscopic Display",
"Integral Photography",
"Flys Eye Lens"
],
"authors": [
{
"affiliation": "Kanagawa Inst. of Technol., Atsugi, Japan",
"fullName": "Yusuke Kawano",
"givenName": "Yusuke",
"surname": "Kawano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kanagawa Inst. of Technol., Atsugi, Japan",
"fullName": "Kazuhisa Yanaka",
"givenName": "Kazuhisa",
"surname": "Yanaka",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-03-01T00:00:00",
"pubType": "proceedings",
"pages": "141-142",
"year": "2013",
"issn": null,
"isbn": "978-1-4673-6097-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06550217",
"articleId": "12OmNxZBSzZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06550219",
"articleId": "12OmNCdBDJz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2005/9077/0/01611911",
"title": "Using a Tablet PC for Classroom Instruction",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2005/01611911/12OmNAQrYG8",
"parentPublication": {
"id": "proceedings/fie/2005/9077/0",
"title": "35th Annual Frontiers in Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2007/1083/0/04417887",
"title": "Teaching dynamics using interactive tablet PC instruction software",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2007/04417887/12OmNAqU4Tp",
"parentPublication": {
"id": "proceedings/fie/2007/1083/0",
"title": "2007 37th Annual Frontiers in Education Conference - Global Engineering: Knowledge Without Borders, Opportunities Without Passports",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413669",
"title": "Depth estimation using stereo fish-eye lenses",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413669/12OmNBfqG59",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549419",
"title": "Real-time rendering of extended fractional view integral photography",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549419/12OmNC8dgoG",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223436",
"title": "Underwater integral photography",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223436/12OmNCm7BLE",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2011/468/0/06142773",
"title": "Tablet PC use in freshman mathematics classes promotes STEM retention",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2011/06142773/12OmNylKAZo",
"parentPublication": {
"id": "proceedings/fie/2011/468/0",
"title": "2011 Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2016/0987/0/0987a569",
"title": "Flexible Screen Sharing System between PC and Tablet for Collaborative Activities",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2016/0987a569/12OmNyz5JP7",
"parentPublication": {
"id": "proceedings/cisis/2016/0987/0",
"title": "2016 10th International Conference on Complex, Intelligent, and Software Intensive Systems (CISIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2007/09/r9049",
"title": "Handwriting Recognition: Tablet PC Text Input",
"doi": null,
"abstractUrl": "/magazine/co/2007/09/r9049/13rRUxBJhB6",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2008/02/tlt2008020105",
"title": "Tablet PC Technology for the Enhancement of Synchronous Distributed Education",
"doi": null,
"abstractUrl": "/journal/lt/2008/02/tlt2008020105/13rRUxCitFH",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798107",
"title": "Gaze-Dependent Distortion Correction for Thick Lenses in HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798107/1cJ12M9tKM0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNylborE",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAWpyow",
"doi": "10.1109/WACV.2018.00158",
"title": "Stabilizing First Person 360 Degree Videos",
"normalizedTitle": "Stabilizing First Person 360 Degree Videos",
"abstract": "The use of 360 degree cameras, enabling one to record and share full-spherical 360° X 180° view without any cropping in the viewing angle, is on the rise. Shake in such videos is problematic, especially when used in conjunction with VR headsets causing cybersickness to the viewer. The current state-of-the-art video stabilization algorithm [17] designed specifically for 360 degree videos considers the special geometrical constraints in such videos. However, the specific steps in the algorithm can abruptly change the viewing direction in a video leading to unnatural experience for the viewer. In this paper, we propose to fix this anomaly by the use of L1 smoothness constraints on the camera path, as suggested by Grundmann et al. [7]. The modified algorithm is generic and our experiments indicate that the proposed algorithm not only gives a more natural and smoother stabilization for 360 degree videos but can be used for stabilizing normal field of view videos as well.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The use of 360 degree cameras, enabling one to record and share full-spherical 360° X 180° view without any cropping in the viewing angle, is on the rise. Shake in such videos is problematic, especially when used in conjunction with VR headsets causing cybersickness to the viewer. The current state-of-the-art video stabilization algorithm [17] designed specifically for 360 degree videos considers the special geometrical constraints in such videos. However, the specific steps in the algorithm can abruptly change the viewing direction in a video leading to unnatural experience for the viewer. In this paper, we propose to fix this anomaly by the use of L1 smoothness constraints on the camera path, as suggested by Grundmann et al. [7]. The modified algorithm is generic and our experiments indicate that the proposed algorithm not only gives a more natural and smoother stabilization for 360 degree videos but can be used for stabilizing normal field of view videos as well.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The use of 360 degree cameras, enabling one to record and share full-spherical 360° X 180° view without any cropping in the viewing angle, is on the rise. Shake in such videos is problematic, especially when used in conjunction with VR headsets causing cybersickness to the viewer. The current state-of-the-art video stabilization algorithm [17] designed specifically for 360 degree videos considers the special geometrical constraints in such videos. However, the specific steps in the algorithm can abruptly change the viewing direction in a video leading to unnatural experience for the viewer. In this paper, we propose to fix this anomaly by the use of L1 smoothness constraints on the camera path, as suggested by Grundmann et al. [7]. The modified algorithm is generic and our experiments indicate that the proposed algorithm not only gives a more natural and smoother stabilization for 360 degree videos but can be used for stabilizing normal field of view videos as well.",
"fno": "488601b405",
"keywords": [
"User Interfaces",
"Video Cameras",
"Video Signal Processing",
"Virtual Reality",
"360 Degree Cameras",
"Viewing Angle",
"Viewer",
"Viewing Direction",
"Natural Stabilization",
"Smoother Stabilization",
"View Videos",
"Video Stabilization Algorithm",
"First Person 360 Degree Videos",
"VR Headsets",
"Videos",
"Cameras",
"Three Dimensional Displays",
"Face",
"Trajectory",
"Optimization",
"Headphones"
],
"authors": [
{
"affiliation": null,
"fullName": "Chetan Arora",
"givenName": "Chetan",
"surname": "Arora",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Vivek Kwatra",
"givenName": "Vivek",
"surname": "Kwatra",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1405-1413",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-4886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "488601b397",
"articleId": "12OmNzlly4u",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "488601b414",
"articleId": "12OmNBqv2qI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892229",
"title": "6-DOF VR videos with a single 360-camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892229/12OmNAlvHtF",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2016/9005/0/07840720",
"title": "Shooting a moving target: Motion-prediction-based transmission for 360-degree videos",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2016/07840720/12OmNvjgWtu",
"parentPublication": {
"id": "proceedings/big-data/2016/9005/0",
"title": "2016 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2017/2937/0/2937a274",
"title": "Robust and Fast Object Tracking for Challenging 360-degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a274/12OmNwE9ONa",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b396",
"title": "Deep 360 Pilot: Learning a Deep Agent for Piloting through 360° Sports Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b396/12OmNwKGAlL",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486537",
"title": "A Subjective Study of Viewer Navigation Behaviors When Watching 360-Degree Videos on Computers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486537/14jQfTvagGm",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcn/2018/4413/0/08638092",
"title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i",
"parentPublication": {
"id": "proceedings/lcn/2018/4413/0",
"title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2022/6390/0/09904420",
"title": "Subtitle-based Viewport Prediction for 360-degree Virtual Tourism Video",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2022/09904420/1H5KpY37ODe",
"parentPublication": {
"id": "proceedings/iisa/2022/6390/0",
"title": "2022 13th International Conference on Information, Intelligence, Systems & Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a417",
"title": "Behavioural Biometrics in Virtual Reality: To What Extent Can We Identify a Person Based Solely on How They Watch 360-Degree Videos?",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a417/1J7WdzjtFG8",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/searis/2018/6272/0/09180230",
"title": "Lightweight Visualization and User Logging for Mobile 360-degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2018/09180230/1mK7jepaiRy",
"parentPublication": {
"id": "proceedings/searis/2018/6272/0",
"title": "2018 IEEE 11th Workshop on Software Engineering and Architectures for Real-time Interactive Systems (SEARIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a082",
"title": "Redefine the A in ABR for 360-degree Videos: A Flexible ABR Framework",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a082/1qBbIEON8UU",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNznkJU6",
"doi": "10.1109/VR.2017.7892377",
"title": "WebVR meets WebRTC: Towards 360-degree social VR experiences",
"normalizedTitle": "WebVR meets WebRTC: Towards 360-degree social VR experiences",
"abstract": "Virtual Reality (VR) and 360-degree video are reshaping the media landscape, creating a fertile business environment. During 2016 new 360-degree cameras and VR headsets entered the consumer market, distribution platforms are being established and new production studios are emerging. VR is evermore becoming a hot topic in research and industry and many new and exciting interactive VR content and experiences are emerging. The biggest gap we see in these experiences are social and shared aspects of VR. In this demo we present our ongoing efforts towards social and shared VR by developing a modular web based VR framework, that extends current video conferencing capabilities with new functionalities of Virtual and Mixed Reality. It allows us to connect two people together for mediated audio-visual interaction, while being able to engage in interactive content. Our framework allows to run extensive technological and user based trials in order to evaluate VR experiences and to build immersive multi-user interaction spaces. Our first results indicate that a high level of engagement and interaction between users is possible in our 360-degree VR set-up utilizing current web technologies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality (VR) and 360-degree video are reshaping the media landscape, creating a fertile business environment. During 2016 new 360-degree cameras and VR headsets entered the consumer market, distribution platforms are being established and new production studios are emerging. VR is evermore becoming a hot topic in research and industry and many new and exciting interactive VR content and experiences are emerging. The biggest gap we see in these experiences are social and shared aspects of VR. In this demo we present our ongoing efforts towards social and shared VR by developing a modular web based VR framework, that extends current video conferencing capabilities with new functionalities of Virtual and Mixed Reality. It allows us to connect two people together for mediated audio-visual interaction, while being able to engage in interactive content. Our framework allows to run extensive technological and user based trials in order to evaluate VR experiences and to build immersive multi-user interaction spaces. Our first results indicate that a high level of engagement and interaction between users is possible in our 360-degree VR set-up utilizing current web technologies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality (VR) and 360-degree video are reshaping the media landscape, creating a fertile business environment. During 2016 new 360-degree cameras and VR headsets entered the consumer market, distribution platforms are being established and new production studios are emerging. VR is evermore becoming a hot topic in research and industry and many new and exciting interactive VR content and experiences are emerging. The biggest gap we see in these experiences are social and shared aspects of VR. In this demo we present our ongoing efforts towards social and shared VR by developing a modular web based VR framework, that extends current video conferencing capabilities with new functionalities of Virtual and Mixed Reality. It allows us to connect two people together for mediated audio-visual interaction, while being able to engage in interactive content. Our framework allows to run extensive technological and user based trials in order to evaluate VR experiences and to build immersive multi-user interaction spaces. Our first results indicate that a high level of engagement and interaction between users is possible in our 360-degree VR set-up utilizing current web technologies.",
"fno": "07892377",
"keywords": [
"Games",
"Web RTC",
"Virtual Environments",
"Green Products",
"Cameras",
"Three Dimensional Displays",
"Virtual Reality",
"VR",
"Social VR",
"Web RTC",
"Web VR",
"Interactive Content",
"Immersive Virtual Environments"
],
"authors": [
{
"affiliation": "TNO, The Netherlands",
"fullName": "Simon Gunkel",
"givenName": "Simon",
"surname": "Gunkel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TNO, The Netherlands",
"fullName": "Martin Prins",
"givenName": "Martin",
"surname": "Prins",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TNO, The Netherlands",
"fullName": "Hans Stokking",
"givenName": "Hans",
"surname": "Stokking",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TNO, The Netherlands",
"fullName": "Omar Niamut",
"givenName": "Omar",
"surname": "Niamut",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "457-458",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892376",
"articleId": "12OmNC2OSOD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892378",
"articleId": "12OmNzkuKKg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2016/4571/0/4571a107",
"title": "Adaptive 360 VR Video Streaming: Divide and Conquer",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a107/12OmNAMtAMS",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b405",
"title": "Stabilizing First Person 360 Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b405/12OmNAWpyow",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892229",
"title": "6-DOF VR videos with a single 360-camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892229/12OmNAlvHtF",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iucc-css/2016/5566/0/5566a145",
"title": "Engaging Immersive Video Consumers: Challenges Regarding 360-Degree Gamified Video Applications",
"doi": null,
"abstractUrl": "/proceedings-article/iucc-css/2016/5566a145/12OmNyUFfI4",
"parentPublication": {
"id": "proceedings/iucc-css/2016/5566/0",
"title": "2016 15th International Conference on Ubiquitous Computing and Communications and 2016 International Symposium on Cyberspace and Security (IUCC-CSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486537",
"title": "A Subjective Study of Viewer Navigation Behaviors When Watching 360-Degree Videos on Computers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486537/14jQfTvagGm",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a023",
"title": "View-Adaptive Asymmetric Image Detail Enhancement for 360-degree Stereoscopic VR Content",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a023/1CJcBKE82SA",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798261",
"title": "Hybrid Projection For Encoding 360 VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798261/1cJ0Wb1xK4E",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797971",
"title": "360-Degree Photo-realistic VR Conferencing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797971/1cJ1b26beEg",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090548",
"title": "Visual Guidance Methods in Immersive and Interactive VR Environments with Connected 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090548/1jIxsIo58PK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/04/09384236",
"title": "The Potential of 360° Virtual Reality Videos and Real VR for Education—A Literature Review",
"doi": null,
"abstractUrl": "/magazine/cg/2021/04/09384236/1scDA5NYISI",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19wB16JGcSY",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"acronym": "mipr",
"groupId": "1825825",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "19wB4BA5bEI",
"doi": "10.1109/MIPR.2019.00011",
"title": "FDDB-360: Face Detection in 360-Degree Fisheye Images",
"normalizedTitle": "FDDB-360: Face Detection in 360-Degree Fisheye Images",
"abstract": "360° cameras offer the possibility to cover a large area, for example an entire room, without using multiple distributed vision sensors. However, geometric distortions introduced by their lenses make computer vision problems more challenging. In this paper we address face detection in 360° fisheye images. We show how a face detector trained on regular images can be re-trained for this purpose, and we also provide a 360° fisheye-like version of the popular FDDB face detection dataset, which we call FDDB-360.",
"abstracts": [
{
"abstractType": "Regular",
"content": "360° cameras offer the possibility to cover a large area, for example an entire room, without using multiple distributed vision sensors. However, geometric distortions introduced by their lenses make computer vision problems more challenging. In this paper we address face detection in 360° fisheye images. We show how a face detector trained on regular images can be re-trained for this purpose, and we also provide a 360° fisheye-like version of the popular FDDB face detection dataset, which we call FDDB-360.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "360° cameras offer the possibility to cover a large area, for example an entire room, without using multiple distributed vision sensors. However, geometric distortions introduced by their lenses make computer vision problems more challenging. In this paper we address face detection in 360° fisheye images. We show how a face detector trained on regular images can be re-trained for this purpose, and we also provide a 360° fisheye-like version of the popular FDDB face detection dataset, which we call FDDB-360.",
"fno": "119800a015",
"keywords": [
"Face",
"Distortion",
"Face Detection",
"Detectors",
"Training",
"Two Dimensional Displays",
"Cameras",
"Face Detection",
"Deep Learning",
"360 X 00 B 0 Images",
"FDDB 360 Dataset"
],
"authors": [
{
"affiliation": null,
"fullName": "Jianglin Fu",
"givenName": "Jianglin",
"surname": "Fu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Saeed Ranjbar Alvar",
"givenName": "Saeed",
"surname": "Ranjbar Alvar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ivan Bajic",
"givenName": "Ivan",
"surname": "Bajic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rodney Vaughan",
"givenName": "Rodney",
"surname": "Vaughan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "15-19",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1198-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "119800a009",
"articleId": "19wB2ctz7ry",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "119800a020",
"articleId": "19wB4I0C0hi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dcabes/2011/4415/0/4415a187",
"title": "Generation of Panoramic View from 360 Degree Fisheye Images Based on Angular Fisheye Projection",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2011/4415a187/12OmNALCNsK",
"parentPublication": {
"id": "proceedings/dcabes/2011/4415/0",
"title": "2011 10th International Symposium on Distributed Computing and Applications to Business, Engineering and Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859817",
"title": "Omni-NeRF: Neural Radiance Field from 360° Image Captures",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859817/1G9DIJAkSzK",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2018/6956/0/695600a187",
"title": "Distortion Correction of Building-Fisheye-Images",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2018/695600a187/1dUo3WXGjbG",
"parentPublication": {
"id": "proceedings/icnisc/2018/6956/0",
"title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093262",
"title": "360-Indoor: Towards Learning Real-World Objects in 360° Indoor Equirectangular Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093262/1jPbAWPyE8g",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093452",
"title": "Visual Question Answering on 360° Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093452/1jPbCyCHgkw",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpai/2020/4262/0/426200a194",
"title": "360 Degree Fish Eye Optical Construction For Equirectangular Projection of Panoramic Images",
"doi": null,
"abstractUrl": "/proceedings-article/icpai/2020/426200a194/1pZ189zqnN6",
"parentPublication": {
"id": "proceedings/icpai/2020/4262/0",
"title": "2020 International Conference on Pervasive Artificial Intelligence (ICPAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382903",
"title": "A Log-Rectilinear Transformation for Foveated 360-degree Video Streaming",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382903/1saZxiH9uaQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100b174",
"title": "Deep Single Fisheye Image Camera Calibration for Over 180-degree Projection of Field of View",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100b174/1yNinwg4Lvy",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1418",
"title": "Real-Time Sphere Sweeping Stereo from Multiview Fisheye Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1418/1yeHJLiupiM",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2021/2744/0/09631545",
"title": "Improved Face Detector on Fisheye Images via Spherical-Domain Attention",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2021/09631545/1zmvL3l1TVK",
"parentPublication": {
"id": "proceedings/iscc/2021/2744/0",
"title": "2021 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0QeOY360",
"doi": "10.1109/VR.2019.8797969",
"title": "Automatic Generation of Interactive 3D Characters and Scenes for Virtual Reality from a Single-Viewpoint 360-Degree Video",
"normalizedTitle": "Automatic Generation of Interactive 3D Characters and Scenes for Virtual Reality from a Single-Viewpoint 360-Degree Video",
"abstract": "This work addresses the problem of using real-world data captured from a single viewpoint by a low-cost 360-degree camera to create an immersive and interactive virtual reality scene. We combine different existing state-of-the-art data enhancement methods based on pre-trained deep learning models to quickly and automatically obtain 3D scenes with animated character models from a 360-degree video. We provide details on our implementation and insight on how to adapt existing methods to 360-degree inputs. We also present the results of a user study assessing the extent to which virtual agents generated by this process are perceived as present and engaging.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work addresses the problem of using real-world data captured from a single viewpoint by a low-cost 360-degree camera to create an immersive and interactive virtual reality scene. We combine different existing state-of-the-art data enhancement methods based on pre-trained deep learning models to quickly and automatically obtain 3D scenes with animated character models from a 360-degree video. We provide details on our implementation and insight on how to adapt existing methods to 360-degree inputs. We also present the results of a user study assessing the extent to which virtual agents generated by this process are perceived as present and engaging.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work addresses the problem of using real-world data captured from a single viewpoint by a low-cost 360-degree camera to create an immersive and interactive virtual reality scene. We combine different existing state-of-the-art data enhancement methods based on pre-trained deep learning models to quickly and automatically obtain 3D scenes with animated character models from a 360-degree video. We provide details on our implementation and insight on how to adapt existing methods to 360-degree inputs. We also present the results of a user study assessing the extent to which virtual agents generated by this process are perceived as present and engaging.",
"fno": "08797969",
"keywords": [
"Computer Animation",
"Learning Artificial Intelligence",
"Neural Nets",
"Virtual Reality",
"Single Viewpoint 360 Degree Video",
"360 Degree Camera",
"Immersive Reality Scene",
"Interactive Virtual Reality Scene",
"Pre Trained Deep Learning Models",
"Animated Character Models",
"360 Degree Inputs",
"Virtual Agents",
"Automatic Generation",
"Interactive 3 D Characters",
"Data Enhancement",
"Three Dimensional Displays",
"Solid Modeling",
"Adaptation Models",
"Virtual Reality",
"Cameras",
"Shape",
"Pose Estimation",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Virtual Reality",
"Computing Methodologies X 2014 Computer Graphics X 2014 Animation X 2014 Motion Capture"
],
"authors": [
{
"affiliation": "Centre for Robotics, MINES ParisTech, PSL Research University",
"fullName": "Gregoire Dupont de Dinechin",
"givenName": "Gregoire Dupont",
"surname": "de Dinechin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Centre for Robotics, MINES ParisTech, PSL Research University",
"fullName": "Alexis Paljic",
"givenName": "Alexis",
"surname": "Paljic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "908-909",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798073",
"articleId": "1cJ1cEQE120",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798006",
"articleId": "1cJ1ej1Xf7W",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2018/4886/0/488601b405",
"title": "Stabilizing First Person 360 Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b405/12OmNAWpyow",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iucc-css/2016/5566/0/5566a145",
"title": "Engaging Immersive Video Consumers: Challenges Regarding 360-Degree Gamified Video Applications",
"doi": null,
"abstractUrl": "/proceedings-article/iucc-css/2016/5566a145/12OmNyUFfI4",
"parentPublication": {
"id": "proceedings/iucc-css/2016/5566/0",
"title": "2016 15th International Conference on Ubiquitous Computing and Communications and 2016 International Symposium on Cyberspace and Security (IUCC-CSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892319",
"title": "Adaptive 360-degree video streaming using layered video coding",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892319/12OmNyv7mbV",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a001",
"title": "360ViewPET: View Based Pose EsTimation for Ultra-Sparse 360-Degree Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a001/1A3ja9gobvO",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a281",
"title": "Ultra-Sparse 360-Degree Camera View Synthesis for Immersive Virtual Tourism",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a281/1Gvddeocuqs",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a018",
"title": "Real-Time Object Detection for 360-Degree Panoramic Image Using CNN",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a018/1ap5AIwwpUc",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a400",
"title": "The SJTU UHD 360-Degree Immersive Video Sequence Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a400/1ap5CKgxrm8",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcime/2019/5586/0/558600a288",
"title": "The Effect of 360-Degree Video Authentic Materials on EFL Learners' Listening Comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/ijcime/2019/558600a288/1j9wyWbeGsg",
"parentPublication": {
"id": "proceedings/ijcime/2019/5586/0",
"title": "2019 International Joint Conference on Information, Media and Engineering (IJCIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09106031",
"title": "The Hyper360 Toolset For Enriched 360° Video",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09106031/1kwqzpVU3za",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2021/4989/0/09456008",
"title": "Visual Saliency Prediction on 360 Degree Images With CNN",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2021/09456008/1uCgpAC9kZi",
"parentPublication": {
"id": "proceedings/icmew/2021/4989/0",
"title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1kwqNHC4Fy0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1kwqZY9pI7m",
"doi": "10.1109/ICME46284.2020.9102936",
"title": "A Viewport-Driven Multi-Metric Fusion Approach for 360-Degree Video Quality Assessment",
"normalizedTitle": "A Viewport-Driven Multi-Metric Fusion Approach for 360-Degree Video Quality Assessment",
"abstract": "We propose a new viewport-based multi-metric fusion (MMF) approach for visual quality assessment of 360-degree (omnidirectional) videos. Our method is based on computing multiple spatio-temporal objective quality metrics (features) on viewports extracted from 360-degree videos, and learning a model that combines these features into a metric, which closely matches subjective quality scores. The main motivations for the proposed method are that: 1) quality metrics computed on viewports better captures the user experience than metrics computed on the projection domain; 2) no individual objective image quality metric always performs best for all types of visual distortions, while a learned combination of them is able to adapt to different conditions and produce better results overall. Experimental results, based on the largest available 360-degree videos quality dataset, demonstrate that the proposed metric outperforms state-of-the-art 360-degree and 2D video quality metrics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new viewport-based multi-metric fusion (MMF) approach for visual quality assessment of 360-degree (omnidirectional) videos. Our method is based on computing multiple spatio-temporal objective quality metrics (features) on viewports extracted from 360-degree videos, and learning a model that combines these features into a metric, which closely matches subjective quality scores. The main motivations for the proposed method are that: 1) quality metrics computed on viewports better captures the user experience than metrics computed on the projection domain; 2) no individual objective image quality metric always performs best for all types of visual distortions, while a learned combination of them is able to adapt to different conditions and produce better results overall. Experimental results, based on the largest available 360-degree videos quality dataset, demonstrate that the proposed metric outperforms state-of-the-art 360-degree and 2D video quality metrics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new viewport-based multi-metric fusion (MMF) approach for visual quality assessment of 360-degree (omnidirectional) videos. Our method is based on computing multiple spatio-temporal objective quality metrics (features) on viewports extracted from 360-degree videos, and learning a model that combines these features into a metric, which closely matches subjective quality scores. The main motivations for the proposed method are that: 1) quality metrics computed on viewports better captures the user experience than metrics computed on the projection domain; 2) no individual objective image quality metric always performs best for all types of visual distortions, while a learned combination of them is able to adapt to different conditions and produce better results overall. Experimental results, based on the largest available 360-degree videos quality dataset, demonstrate that the proposed metric outperforms state-of-the-art 360-degree and 2D video quality metrics.",
"fno": "09102936",
"keywords": [
"Image Fusion",
"Learning Artificial Intelligence",
"Statistical Analysis",
"Video Coding",
"Video Signal Processing",
"Viewport Based Multimetric Fusion Approach",
"Visual Quality Assessment",
"Multiple Spatio Temporal",
"Viewports",
"Subjective Quality Scores",
"Individual Objective Image Quality Metric",
"Learned Combination",
"360 Degree Videos Quality Dataset",
"Metric Outperforms State Of The Art 360 Degree",
"2 D Video Quality Metrics",
"Viewport Driven Multimetric Fusion Approach",
"360 Degree Video Quality Assessment",
"Measurement",
"Quality Assessment",
"Visualization",
"Video Recording",
"Feature Extraction",
"Two Dimensional Displays",
"Distortion",
"Visual Quality Assessment",
"Omnidirectional Video",
"360 Degree Video",
"Multi Metric Fusion"
],
"authors": [
{
"affiliation": "EPFL, LTS4,Lausanne,Switzerland",
"fullName": "Roberto G. de A. Azevedo",
"givenName": "Roberto G.",
"surname": "de A. Azevedo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "YouTube, Moutain View,CA,USA",
"fullName": "Neil Birkbeck",
"givenName": "Neil",
"surname": "Birkbeck",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "YouTube, Moutain View,CA,USA",
"fullName": "Ivan Janatra",
"givenName": "Ivan",
"surname": "Janatra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "YouTube, Moutain View,CA,USA",
"fullName": "Balu Adsumilli",
"givenName": "Balu",
"surname": "Adsumilli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "EPFL, LTS4,Lausanne,Switzerland",
"fullName": "Pascal Frossard",
"givenName": "Pascal",
"surname": "Frossard",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-1331-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09102923",
"articleId": "1kwrebHgxDW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09102958",
"articleId": "1kwqUmArJvy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2017/2937/0/2937a038",
"title": "A New Adaptation Approach for Viewport-adaptive 360-degree Video Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a038/12OmNwwd2MD",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019492",
"title": "Spherical domain rate-distortion optimization for 360-degree video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019492/12OmNzEmFHn",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486584",
"title": "Spherical Structural Similarity Index for Objective Omnidirectional Video Quality Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486584/14jQfPogtSp",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551577",
"title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360° Video Network Multicast",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/5555/01/09920013",
"title": "Could Head Motions Affect Quality When Viewing 360-Degree Videos?",
"doi": null,
"abstractUrl": "/magazine/mu/5555/01/09920013/1HxSle7FJHW",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0169",
"title": "Viewport Proposal CNN for 360° Video Quality Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0169/1gyrgYBrmpy",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/04/09212608",
"title": "Viewport-Based CNN: A Multi-Task Approach for Assessing 360° Video Quality",
"doi": null,
"abstractUrl": "/journal/tp/2022/04/09212608/1nG8VYgj7Ik",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ucc/2020/2394/0/239400a414",
"title": "Accuracy Analysis on 360° Virtual Reality Video Quality Assessment Methods",
"doi": null,
"abstractUrl": "/proceedings-article/ucc/2020/239400a414/1pZ0Z6h4ERq",
"parentPublication": {
"id": "proceedings/ucc/2020/2394/0",
"title": "2020 IEEE/ACM 13th International Conference on Utility and Cloud Computing (UCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a085",
"title": "On Subpicture-based Viewport-dependent 360-degree Video Streaming using VVC",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a085/1qBbHaCz3vG",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2021/9101/0/09333964",
"title": "Implementing Viewport Tile Extractor for Viewport-Adaptive 360-Degree Video Tiled Streaming",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2021/09333964/1qTrL1nfEyc",
"parentPublication": {
"id": "proceedings/icoin/2021/9101/0",
"title": "2021 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwGIcBW",
"title": "2016 Intl IEEE Conferences on Ubiquitous Intelligence & Computing, Advanced and Trusted Computing, Scalable Computing and Communications, Cloud and Big Data Computing, Internet of People, and Smart World Congress (UIC/ATC/ScalCom/CBDCom/IoP/SmartWorld)",
"acronym": "uic-atc-scalcom-cbdcom-iop-smartworld",
"groupId": "1002946",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzahccM",
"doi": "10.1109/UIC-ATC-ScalCom-CBDCom-IoP-SmartWorld.2016.0152",
"title": "Urban Knowledge Extraction, Representation and Reasoning as a Bridge from Data City towards Smart City",
"normalizedTitle": "Urban Knowledge Extraction, Representation and Reasoning as a Bridge from Data City towards Smart City",
"abstract": "Urban Data management represents a major challenge in the field of Smart Cities. Its understanding is essential for the development of better smart services, which are a persistent demand in urban policies. From all the sources of data available, those that involve a collective processing of urban information (by the citizens or other collectives) deliver in fact, useful insights into social perception. Such is the case, for example, of data collected from mobile networks. Prior to the design of sociotechnical artifacts in cities, it seems important to extract the qualitative, quantitative opinions, sentiment, feedbacks present in these data. In this paper we present three solutions for mining these contents through Knowledge Extraction methods, as a previous step to the prospection of new smart services.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Urban Data management represents a major challenge in the field of Smart Cities. Its understanding is essential for the development of better smart services, which are a persistent demand in urban policies. From all the sources of data available, those that involve a collective processing of urban information (by the citizens or other collectives) deliver in fact, useful insights into social perception. Such is the case, for example, of data collected from mobile networks. Prior to the design of sociotechnical artifacts in cities, it seems important to extract the qualitative, quantitative opinions, sentiment, feedbacks present in these data. In this paper we present three solutions for mining these contents through Knowledge Extraction methods, as a previous step to the prospection of new smart services.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Urban Data management represents a major challenge in the field of Smart Cities. Its understanding is essential for the development of better smart services, which are a persistent demand in urban policies. From all the sources of data available, those that involve a collective processing of urban information (by the citizens or other collectives) deliver in fact, useful insights into social perception. Such is the case, for example, of data collected from mobile networks. Prior to the design of sociotechnical artifacts in cities, it seems important to extract the qualitative, quantitative opinions, sentiment, feedbacks present in these data. In this paper we present three solutions for mining these contents through Knowledge Extraction methods, as a previous step to the prospection of new smart services.",
"fno": "07816948",
"keywords": [
"Urban Areas",
"Semantics",
"Data Mining",
"Feature Extraction",
"Cognition",
"Lattices",
"Ontologies",
"Formal Concept Analysis",
"Smart Cities"
],
"authors": [
{
"affiliation": null,
"fullName": "Jaime de Miguel-Rodríguez",
"givenName": "Jaime de",
"surname": "Miguel-Rodríguez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Juan Galán-Páez",
"givenName": "Juan",
"surname": "Galán-Páez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gonzalo A. Aranda-Corral",
"givenName": "Gonzalo A.",
"surname": "Aranda-Corral",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Joaquín Borrego-Díaz",
"givenName": "Joaquín",
"surname": "Borrego-Díaz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "uic-atc-scalcom-cbdcom-iop-smartworld",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "968-974",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-2771-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07816947",
"articleId": "12OmNxiKrYH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07816949",
"articleId": "12OmNx5GU7A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/smartcomp/2018/4705/0/470501a381",
"title": "Reasoning about Smart City",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2018/470501a381/12OmNy5R3v7",
"parentPublication": {
"id": "proceedings/smartcomp/2018/4705/0",
"title": "2018 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2017/02/mpc2017020044",
"title": "SenseCityVity: Mobile Crowdsourcing, Urban Awareness, and Collective Action in Mexico",
"doi": null,
"abstractUrl": "/magazine/pc/2017/02/mpc2017020044/13rRUIIVl9M",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2017/02/mpc2017020084",
"title": "What Can We Learn from Smart Urban Mobility Technologies?",
"doi": null,
"abstractUrl": "/magazine/pc/2017/02/mpc2017020084/13rRUNvgz1h",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2006/09/r9038",
"title": "Imagining the City: The Cultural Dimensions of Urban Computing",
"doi": null,
"abstractUrl": "/magazine/co/2006/09/r9038/13rRUwbs26q",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2014/10/mco2014100072",
"title": "China's Smart City Pilots: A Progress Report",
"doi": null,
"abstractUrl": "/magazine/co/2014/10/mco2014100072/13rRUyYSWnR",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2019/06/08896155",
"title": "Smart City: Technologies and Challenges",
"doi": null,
"abstractUrl": "/magazine/it/2019/06/08896155/1eS9TCrMvMQ",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2020/6034/0/603400a601",
"title": "KISTI Vehicle-Based Urban Sensing Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2020/603400a601/1jdDuBXTsWI",
"parentPublication": {
"id": "proceedings/bigcomp/2020/6034/0",
"title": "2020 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcomp/2020/6997/0/699700a332",
"title": "Discovering Multi-density Urban Hotspots in a Smart City",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2020/699700a332/1oxo8AQD6yA",
"parentPublication": {
"id": "proceedings/smartcomp/2020/6997/0",
"title": "2020 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2020/8666/0/866600a687",
"title": "Study on the Construction of Sponge City Based on the Background of Urban Waterlogging Control",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2020/866600a687/1wRIybsFooU",
"parentPublication": {
"id": "proceedings/icicta/2020/8666/0",
"title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2021/06/09655385",
"title": "Toward Trustworthy Urban IT Systems: The Bright and Dark Sides of Smart City Development",
"doi": null,
"abstractUrl": "/magazine/it/2021/06/09655385/1zpnKOV427m",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BYIuY5uWvS",
"title": "2022 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"acronym": "bigcomp",
"groupId": "1803439",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1BYICHFmoM0",
"doi": "10.1109/BigComp54360.2022.00052",
"title": "Urban Event Detection from Spatio-temporal IoT Sensor Data Using Graph-Based Machine Learning",
"normalizedTitle": "Urban Event Detection from Spatio-temporal IoT Sensor Data Using Graph-Based Machine Learning",
"abstract": "The accurate detection and handling of urban events such as emergency incidents are critical to improve the safety and convenience of people's life in urban environments. Recently, it has become possible to detect and handle urban events in an effective manner by analyzing the detailed urban data collected from Internet of Things (IoT) sensors. Especially, some recent works investigated the use of spatio-temporal sensor data for urban event detection. However, we found there is one challenge of having less accuracy of detecting urban events as the granularity of processing data in the spatial dimension becomes finer. To meet the challenge, we propose a novel graph-based approach that analyzes geo-spatial characteristics of urban sensor data over time to keep the accuracy of detecting urban anomaly in finer-grained geo-spatial scales, by identifying and exploiting regions that have abnormal urban dynamics. Through experiments using real-world urban datasets, we show our approach effectively addresses the challenge and outperforms the popular machine-learning-based urban event detection methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The accurate detection and handling of urban events such as emergency incidents are critical to improve the safety and convenience of people's life in urban environments. Recently, it has become possible to detect and handle urban events in an effective manner by analyzing the detailed urban data collected from Internet of Things (IoT) sensors. Especially, some recent works investigated the use of spatio-temporal sensor data for urban event detection. However, we found there is one challenge of having less accuracy of detecting urban events as the granularity of processing data in the spatial dimension becomes finer. To meet the challenge, we propose a novel graph-based approach that analyzes geo-spatial characteristics of urban sensor data over time to keep the accuracy of detecting urban anomaly in finer-grained geo-spatial scales, by identifying and exploiting regions that have abnormal urban dynamics. Through experiments using real-world urban datasets, we show our approach effectively addresses the challenge and outperforms the popular machine-learning-based urban event detection methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The accurate detection and handling of urban events such as emergency incidents are critical to improve the safety and convenience of people's life in urban environments. Recently, it has become possible to detect and handle urban events in an effective manner by analyzing the detailed urban data collected from Internet of Things (IoT) sensors. Especially, some recent works investigated the use of spatio-temporal sensor data for urban event detection. However, we found there is one challenge of having less accuracy of detecting urban events as the granularity of processing data in the spatial dimension becomes finer. To meet the challenge, we propose a novel graph-based approach that analyzes geo-spatial characteristics of urban sensor data over time to keep the accuracy of detecting urban anomaly in finer-grained geo-spatial scales, by identifying and exploiting regions that have abnormal urban dynamics. Through experiments using real-world urban datasets, we show our approach effectively addresses the challenge and outperforms the popular machine-learning-based urban event detection methods.",
"fno": "219700a234",
"keywords": [
"Geophysical Image Processing",
"Graph Theory",
"Internet Of Things",
"Learning Artificial Intelligence",
"Spatiotemporal Phenomena",
"Town And Country Planning",
"Abnormal Urban Dynamics",
"Real World Urban Datasets",
"Popular Machine Learning Based Urban Event Detection Methods",
"Spatio Temporal Io T Sensor Data",
"Graph Based Machine Learning",
"Urban Events",
"Urban Environments",
"Detailed Urban Data",
"Things Sensors",
"Spatio Temporal Sensor Data",
"Novel Graph Based Approach",
"Analyzes Geo Spatial Characteristics",
"Urban Sensor Data",
"Degradation",
"Event Detection",
"Urban Areas",
"Machine Learning",
"Sensor Phenomena And Characterization",
"Spatial Databases",
"Sensors",
"Urban Event Detection",
"Internet Of Things",
"Spatio Temporal Sensor Dataset",
"Machine Learning",
"Graph Centrality Indicator"
],
"authors": [
{
"affiliation": "Korea Advanced Institute of Science and Technology,School of Computing,Daejeon,Republic of Korea",
"fullName": "Dae-Young Park",
"givenName": "Dae-Young",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Korea Advanced Institute of Science and Technology,School of Computing,Daejeon,Republic of Korea",
"fullName": "In-Young Ko",
"givenName": "In-Young",
"surname": "Ko",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bigcomp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-01-01T00:00:00",
"pubType": "proceedings",
"pages": "234-241",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-2197-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "219700a226",
"articleId": "1BYIwtXCdwI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "219700a242",
"articleId": "1BYIGQu58d2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2016/8942/0/8942a282",
"title": "Word-Clouds in the Sky: Multi-layer Spatio-Temporal Event Visualization from a Geo-Parsed Microblog Stream",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a282/12OmNrAdssZ",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcsw/2009/3660/0/3660a044",
"title": "Spatio-Temporal Event Model for Cyber-Physical Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icdcsw/2009/3660a044/12OmNyY4rnY",
"parentPublication": {
"id": "proceedings/icdcsw/2009/3660/0",
"title": "2009 29th IEEE International Conference on Distributed Computing Systems Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2021/02/08476163",
"title": "BusBeat: Early Event Detection with Real-Time Bus GPS Trajectories",
"doi": null,
"abstractUrl": "/journal/bd/2021/02/08476163/13WBGTItFGX",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876004",
"title": "Using Topological Analysis to Support Event-Guided Exploration in Urban Data",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876004/13rRUx0xPZA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09793708",
"title": "Classification-Labeled Continuousization and Multi-Domain Spatio-Temporal Fusion for Fine-Grained Urban Crime Prediction",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09793708/1E5LzI2tveE",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/10107812",
"title": "Spatio-Temporal Dynamic Graph Relation Learning for Urban Metro Flow Prediction",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/10107812/1MDGjfriXp6",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09006284",
"title": "Spatio-temporal Event Detection using Poisson Model and Quad-tree on Geotagged Social Media",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09006284/1hJsnmuu3NC",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/03/09096591",
"title": "Spatio-Temporal Meta Learning for Urban Traffic Prediction",
"doi": null,
"abstractUrl": "/journal/tk/2022/03/09096591/1jXqZCthRCM",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2020/4663/0/09162295",
"title": "A Big Data Platform For Spatio-Temporal Social Event Discovery",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2020/09162295/1m6hCVtMPVS",
"parentPublication": {
"id": "proceedings/mdm/2020/4663/0",
"title": "2020 21st IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2021/2845/0/284500a039",
"title": "Urban Crowd Density Prediction Based on Multi-relational Graph",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2021/284500a039/1v2QxX1aeSk",
"parentPublication": {
"id": "proceedings/mdm/2021/2845/0",
"title": "2021 22nd IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1hJrHq07uw0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"acronym": "big-data",
"groupId": "1802964",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hJrXYjoEWA",
"doi": "10.1109/BigData47090.2019.9006289",
"title": "Understanding Spatio-Temporal Urban Processes",
"normalizedTitle": "Understanding Spatio-Temporal Urban Processes",
"abstract": "Increasingly, decisions are based on insights and conclusions derived from the results of data analysis. Thus, determining the validity of these results is of paramount importance. In this paper, we take a step towards helping users identify potential issues in spatio-temporal data and thus gain trust in the results they derived from these data. We focus on processes that are captured by relationships among datasets that serve as the data exhaust for different components of urban environments. In this scenario, debugging data involves two important challenges: the inherent complexity of spatio-temporal data, and the number of possible relationships. We propose a framework for profiling spatio-temporal relationships that automatically identifies data slices that present a significant deviation from what is expected, and thus, helps focus a user's attention on slices of the data that may have quality issues and/or that may affect the conclusions derived from the analysis' results. We describe the profiling methodology and how it derives relationships, identifies candidate deviations, assesses their statistical significance, and measures their magnitude. We also present a series of cases studies using real datasets from New York City which demonstrate the usefulness of spatio-temporal profiling to build trust on data analysis' results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Increasingly, decisions are based on insights and conclusions derived from the results of data analysis. Thus, determining the validity of these results is of paramount importance. In this paper, we take a step towards helping users identify potential issues in spatio-temporal data and thus gain trust in the results they derived from these data. We focus on processes that are captured by relationships among datasets that serve as the data exhaust for different components of urban environments. In this scenario, debugging data involves two important challenges: the inherent complexity of spatio-temporal data, and the number of possible relationships. We propose a framework for profiling spatio-temporal relationships that automatically identifies data slices that present a significant deviation from what is expected, and thus, helps focus a user's attention on slices of the data that may have quality issues and/or that may affect the conclusions derived from the analysis' results. We describe the profiling methodology and how it derives relationships, identifies candidate deviations, assesses their statistical significance, and measures their magnitude. We also present a series of cases studies using real datasets from New York City which demonstrate the usefulness of spatio-temporal profiling to build trust on data analysis' results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Increasingly, decisions are based on insights and conclusions derived from the results of data analysis. Thus, determining the validity of these results is of paramount importance. In this paper, we take a step towards helping users identify potential issues in spatio-temporal data and thus gain trust in the results they derived from these data. We focus on processes that are captured by relationships among datasets that serve as the data exhaust for different components of urban environments. In this scenario, debugging data involves two important challenges: the inherent complexity of spatio-temporal data, and the number of possible relationships. We propose a framework for profiling spatio-temporal relationships that automatically identifies data slices that present a significant deviation from what is expected, and thus, helps focus a user's attention on slices of the data that may have quality issues and/or that may affect the conclusions derived from the analysis' results. We describe the profiling methodology and how it derives relationships, identifies candidate deviations, assesses their statistical significance, and measures their magnitude. We also present a series of cases studies using real datasets from New York City which demonstrate the usefulness of spatio-temporal profiling to build trust on data analysis' results.",
"fno": "09006289",
"keywords": [
"Data Analysis",
"Statistical Analysis",
"Trusted Computing",
"Urban Environments",
"Spatio Temporal Relationships",
"Data Slices",
"Spatio Temporal Profiling",
"Data Analysis",
"Spatio Temporal Urban Processes",
"Data Exhaust",
"Trust Building",
"Statistical Significance",
"Urban Areas",
"Correlation",
"Spatial Resolution",
"Data Analysis",
"Mathematical Model",
"Public Transportation",
"Standards",
"Data Quality",
"Data Profiling",
"Urban Data"
],
"authors": [
{
"affiliation": "Universidade Federal de Minas Gerais",
"fullName": "Lais M. A. Rocha",
"givenName": "Lais M. A.",
"surname": "Rocha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "New York University",
"fullName": "Aline Bessa",
"givenName": "Aline",
"surname": "Bessa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "New York University",
"fullName": "Fernando Chirigati",
"givenName": "Fernando",
"surname": "Chirigati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "New York University",
"fullName": "Eugene OFriel",
"givenName": "Eugene",
"surname": "OFriel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade Federal de Minas Gerais",
"fullName": "Mirella M. Moro",
"givenName": "Mirella M.",
"surname": "Moro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "New York University",
"fullName": "Juliana Freire",
"givenName": "Juliana",
"surname": "Freire",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "big-data",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-12-01T00:00:00",
"pubType": "proceedings",
"pages": "563-572",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-0858-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09006217",
"articleId": "1hJrWcFpgPu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09006329",
"articleId": "1hJrNpRd5i8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/mdm/2016/0883/1/0883a318",
"title": "Understanding Urban Mobility via Taxi Trip Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2016/0883a318/12OmNzvQI7o",
"parentPublication": {
"id": "proceedings/mdm/2016/0883/1",
"title": "2016 17th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/05/mcg2018050026",
"title": "Spatio-Temporal Urban Data Analysis: A Visual Analytics Perspective",
"doi": null,
"abstractUrl": "/magazine/cg/2018/05/mcg2018050026/13WBGTItFGV",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2002/04/k0881",
"title": "Spatio-Temporal Predicates",
"doi": null,
"abstractUrl": "/journal/tk/2002/04/k0881/13rRUxAASTo",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08054703",
"title": "VAUD: A Visual Analysis Approach for Exploring Spatio-Temporal Urban Data",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08054703/13rRUxlgxOq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2020/11/08708929",
"title": "Understanding Urban Dynamics via Context-Aware Tensor Factorization with Neighboring Regularization",
"doi": null,
"abstractUrl": "/journal/tk/2020/11/08708929/19Q3gu5xNC0",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2022/5099/0/509900a879",
"title": "Origin-Destination Traffic Prediction based on Hybrid Spatio-Temporal Network",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2022/509900a879/1KpCBZggOS4",
"parentPublication": {
"id": "proceedings/icdm/2022/5099/0",
"title": "2022 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/10107812",
"title": "Spatio-Temporal Dynamic Graph Relation Learning for Urban Metro Flow Prediction",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/10107812/1MDGjfriXp6",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/03/09096591",
"title": "Spatio-Temporal Meta Learning for Urban Traffic Prediction",
"doi": null,
"abstractUrl": "/journal/tk/2022/03/09096591/1jXqZCthRCM",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/03/09086138",
"title": "Spatio-Temporal Capsule-Based Reinforcement Learning for Mobility-on-Demand Coordination",
"doi": null,
"abstractUrl": "/journal/tk/2022/03/09086138/1jyxqxDg00M",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101507",
"title": "JUST: JD Urban Spatio-Temporal Data Engine",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101507/1kaMPqhYecM",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.